Skip to content

Instantly share code, notes, and snippets.

@silvasean
Last active March 11, 2021 02:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save silvasean/8ee109f2289cf721c679d0623a48bfca to your computer and use it in GitHub Desktop.
Save silvasean/8ee109f2289cf721c679d0623a48bfca to your computer and use it in GitHub Desktop.
silero-stt-model.mlir
# Model from https://pytorch.org/hub/snakers4_silero-models_stt/
pt_util --import ~/tmp/silero-stt-model.pt --exported-name forward | npcomp-opt -torch-globalize-pipeline -symbol-dce
115 call @__torch__.torch.nn.functional.dropout$3(
82 call @__torch__.torch.nn.functional.relu$2(
42 call @__torch__.torch.nn.functional.linear$6(
32 call @__torch__.torch.nn.functional.multi_head_attention_forward$7(
32 call @__torch__.torch.nn.functional.layer_norm$10(
12 call @__torch__.torch.nn.functional.batch_norm$4(
6 call @__torch__.torch.nn.functional._pad$0(
3 call @__torch__.torch.nn.functional._pad_circular$1(
2 call @__torch__.torch.nn.functional.softmax$8(
1 call @__torch__.torch.nn.functional._verify_batch_size$5(
1 call @__torch__.torch.nn.functional._get_softmax_dim$9(
1 call @__torch__.torch.functional.stft$11(
1 call @softmax.forward(
1 call @quant.forward(
1 call @fc.forward(
1 call @encoder.forward(
1 call @encoder.9.skip_add.add(
1 call @encoder.9.skip_add.activation_post_process.forward(
1 call @encoder.9.layers.forward(
1 call @encoder.9.layers.9.forward(
1 call @encoder.9.layers.9.1.forward(
1 call @encoder.9.layers.9.0.forward(
1 call @encoder.9.layers.8.forward(
1 call @encoder.9.layers.7.forward(
1 call @encoder.9.layers.6.forward(
1 call @encoder.9.layers.5.forward(
1 call @encoder.9.layers.5.1.forward(
1 call @encoder.9.layers.5.0.forward(
1 call @encoder.9.layers.4.swish.sigmoid.forward(
1 call @encoder.9.layers.4.swish.forward(
1 call @encoder.9.layers.4.sigmoid.forward(
1 call @encoder.9.layers.4._se_reduce.forward(
1 call @encoder.9.layers.4._se_expand.forward(
1 call @encoder.9.layers.4.forward(
1 call @encoder.9.layers.4.f_add.mul(
1 call @encoder.9.layers.4.f_add.activation_post_process.forward(
1 call @encoder.9.layers.3.forward(
1 call @encoder.9.layers.2.forward(
1 call @encoder.9.layers.1.forward(
1 call @encoder.9.layers.17.forward(
1 call @encoder.9.layers.16.forward(
1 call @encoder.9.layers.15.forward(
1 call @encoder.9.layers.14.forward(
1 call @encoder.9.layers.14.1.forward(
1 call @encoder.9.layers.14.0.forward(
1 call @encoder.9.layers.13.swish.sigmoid.forward(
1 call @encoder.9.layers.13.swish.forward(
1 call @encoder.9.layers.13.sigmoid.forward(
1 call @encoder.9.layers.13._se_reduce.forward(
1 call @encoder.9.layers.13._se_expand.forward(
1 call @encoder.9.layers.13.forward(
1 call @encoder.9.layers.13.f_add.mul(
1 call @encoder.9.layers.13.f_add.activation_post_process.forward(
1 call @encoder.9.layers.12.forward(
1 call @encoder.9.layers.11.forward(
1 call @encoder.9.layers.10.forward(
1 call @encoder.9.layers.0.forward(
1 call @encoder.9.layers.0.1.forward(
1 call @encoder.9.layers.0.0.forward(
1 call @encoder.9.forward(
1 call @encoder.8.skip_add.add(
1 call @encoder.8.skip_add.activation_post_process.forward(
1 call @encoder.8.layers.forward(
1 call @encoder.8.layers.9.forward(
1 call @encoder.8.layers.9.1.forward(
1 call @encoder.8.layers.9.0.forward(
1 call @encoder.8.layers.8.forward(
1 call @encoder.8.layers.7.forward(
1 call @encoder.8.layers.6.forward(
1 call @encoder.8.layers.5.forward(
1 call @encoder.8.layers.5.1.forward(
1 call @encoder.8.layers.5.0.forward(
1 call @encoder.8.layers.4.swish.sigmoid.forward(
1 call @encoder.8.layers.4.swish.forward(
1 call @encoder.8.layers.4.sigmoid.forward(
1 call @encoder.8.layers.4._se_reduce.forward(
1 call @encoder.8.layers.4._se_expand.forward(
1 call @encoder.8.layers.4.forward(
1 call @encoder.8.layers.4.f_add.mul(
1 call @encoder.8.layers.4.f_add.activation_post_process.forward(
1 call @encoder.8.layers.3.forward(
1 call @encoder.8.layers.2.forward(
1 call @encoder.8.layers.1.forward(
1 call @encoder.8.layers.17.forward(
1 call @encoder.8.layers.16.forward(
1 call @encoder.8.layers.15.forward(
1 call @encoder.8.layers.14.forward(
1 call @encoder.8.layers.14.1.forward(
1 call @encoder.8.layers.14.0.forward(
1 call @encoder.8.layers.13.swish.sigmoid.forward(
1 call @encoder.8.layers.13.swish.forward(
1 call @encoder.8.layers.13.sigmoid.forward(
1 call @encoder.8.layers.13._se_reduce.forward(
1 call @encoder.8.layers.13._se_expand.forward(
1 call @encoder.8.layers.13.forward(
1 call @encoder.8.layers.13.f_add.mul(
1 call @encoder.8.layers.13.f_add.activation_post_process.forward(
1 call @encoder.8.layers.12.forward(
1 call @encoder.8.layers.11.forward(
1 call @encoder.8.layers.10.forward(
1 call @encoder.8.layers.0.forward(
1 call @encoder.8.layers.0.1.forward(
1 call @encoder.8.layers.0.0.forward(
1 call @encoder.8.forward(
1 call @encoder.7.skip_add.add(
1 call @encoder.7.skip_add.activation_post_process.forward(
1 call @encoder.7.layers.forward(
1 call @encoder.7.layers.7.forward(
1 call @encoder.7.layers.6.forward(
1 call @encoder.7.layers.5.forward(
1 call @encoder.7.layers.4.forward(
1 call @encoder.7.layers.4.1.forward(
1 call @encoder.7.layers.4.0.forward(
1 call @encoder.7.layers.3.forward(
1 call @encoder.7.layers.2.forward(
1 call @encoder.7.layers.1.forward(
1 call @encoder.7.layers.0.forward(
1 call @encoder.7.layers.0.1.forward(
1 call @encoder.7.layers.0.0.forward(
1 call @encoder.7.forward(
1 call @encoder.6.skip_add.add(
1 call @encoder.6.skip_add.activation_post_process.forward(
1 call @encoder.6.layers.forward(
1 call @encoder.6.layers.9.forward(
1 call @encoder.6.layers.9.1.forward(
1 call @encoder.6.layers.9.0.forward(
1 call @encoder.6.layers.8.forward(
1 call @encoder.6.layers.7.forward(
1 call @encoder.6.layers.6.forward(
1 call @encoder.6.layers.5.forward(
1 call @encoder.6.layers.5.1.forward(
1 call @encoder.6.layers.5.0.forward(
1 call @encoder.6.layers.4.swish.sigmoid.forward(
1 call @encoder.6.layers.4.swish.forward(
1 call @encoder.6.layers.4.sigmoid.forward(
1 call @encoder.6.layers.4._se_reduce.forward(
1 call @encoder.6.layers.4._se_expand.forward(
1 call @encoder.6.layers.4.forward(
1 call @encoder.6.layers.4.f_add.mul(
1 call @encoder.6.layers.4.f_add.activation_post_process.forward(
1 call @encoder.6.layers.3.forward(
1 call @encoder.6.layers.2.forward(
1 call @encoder.6.layers.1.forward(
1 call @encoder.6.layers.17.forward(
1 call @encoder.6.layers.16.forward(
1 call @encoder.6.layers.15.forward(
1 call @encoder.6.layers.14.forward(
1 call @encoder.6.layers.14.1.forward(
1 call @encoder.6.layers.14.0.forward(
1 call @encoder.6.layers.13.swish.sigmoid.forward(
1 call @encoder.6.layers.13.swish.forward(
1 call @encoder.6.layers.13.sigmoid.forward(
1 call @encoder.6.layers.13._se_reduce.forward(
1 call @encoder.6.layers.13._se_expand.forward(
1 call @encoder.6.layers.13.forward(
1 call @encoder.6.layers.13.f_add.mul(
1 call @encoder.6.layers.13.f_add.activation_post_process.forward(
1 call @encoder.6.layers.12.forward(
1 call @encoder.6.layers.11.forward(
1 call @encoder.6.layers.10.forward(
1 call @encoder.6.layers.0.forward(
1 call @encoder.6.layers.0.1.forward(
1 call @encoder.6.layers.0.0.forward(
1 call @encoder.6.forward(
1 call @encoder.5.skip_add.add(
1 call @encoder.5.skip_add.activation_post_process.forward(
1 call @encoder.5.layers.forward(
1 call @encoder.5.layers.9.forward(
1 call @encoder.5.layers.9.1.forward(
1 call @encoder.5.layers.9.0.forward(
1 call @encoder.5.layers.8.forward(
1 call @encoder.5.layers.7.forward(
1 call @encoder.5.layers.6.forward(
1 call @encoder.5.layers.5.forward(
1 call @encoder.5.layers.5.1.forward(
1 call @encoder.5.layers.5.0.forward(
1 call @encoder.5.layers.4.swish.sigmoid.forward(
1 call @encoder.5.layers.4.swish.forward(
1 call @encoder.5.layers.4.sigmoid.forward(
1 call @encoder.5.layers.4._se_reduce.forward(
1 call @encoder.5.layers.4._se_expand.forward(
1 call @encoder.5.layers.4.forward(
1 call @encoder.5.layers.4.f_add.mul(
1 call @encoder.5.layers.4.f_add.activation_post_process.forward(
1 call @encoder.5.layers.3.forward(
1 call @encoder.5.layers.2.forward(
1 call @encoder.5.layers.1.forward(
1 call @encoder.5.layers.17.forward(
1 call @encoder.5.layers.16.forward(
1 call @encoder.5.layers.15.forward(
1 call @encoder.5.layers.14.forward(
1 call @encoder.5.layers.14.1.forward(
1 call @encoder.5.layers.14.0.forward(
1 call @encoder.5.layers.13.swish.sigmoid.forward(
1 call @encoder.5.layers.13.swish.forward(
1 call @encoder.5.layers.13.sigmoid.forward(
1 call @encoder.5.layers.13._se_reduce.forward(
1 call @encoder.5.layers.13._se_expand.forward(
1 call @encoder.5.layers.13.forward(
1 call @encoder.5.layers.13.f_add.mul(
1 call @encoder.5.layers.13.f_add.activation_post_process.forward(
1 call @encoder.5.layers.12.forward(
1 call @encoder.5.layers.11.forward(
1 call @encoder.5.layers.10.forward(
1 call @encoder.5.layers.0.forward(
1 call @encoder.5.layers.0.1.forward(
1 call @encoder.5.layers.0.0.forward(
1 call @encoder.5.forward(
1 call @encoder.4.skip_add.add(
1 call @encoder.4.skip_add.activation_post_process.forward(
1 call @encoder.4.layers.forward(
1 call @encoder.4.layers.9.forward(
1 call @encoder.4.layers.9.1.forward(
1 call @encoder.4.layers.9.0.forward(
1 call @encoder.4.layers.8.forward(
1 call @encoder.4.layers.7.forward(
1 call @encoder.4.layers.6.forward(
1 call @encoder.4.layers.5.forward(
1 call @encoder.4.layers.5.1.forward(
1 call @encoder.4.layers.5.0.forward(
1 call @encoder.4.layers.4.swish.sigmoid.forward(
1 call @encoder.4.layers.4.swish.forward(
1 call @encoder.4.layers.4.sigmoid.forward(
1 call @encoder.4.layers.4._se_reduce.forward(
1 call @encoder.4.layers.4._se_expand.forward(
1 call @encoder.4.layers.4.forward(
1 call @encoder.4.layers.4.f_add.mul(
1 call @encoder.4.layers.4.f_add.activation_post_process.forward(
1 call @encoder.4.layers.3.forward(
1 call @encoder.4.layers.2.forward(
1 call @encoder.4.layers.1.forward(
1 call @encoder.4.layers.17.forward(
1 call @encoder.4.layers.16.forward(
1 call @encoder.4.layers.15.forward(
1 call @encoder.4.layers.14.forward(
1 call @encoder.4.layers.14.1.forward(
1 call @encoder.4.layers.14.0.forward(
1 call @encoder.4.layers.13.swish.sigmoid.forward(
1 call @encoder.4.layers.13.swish.forward(
1 call @encoder.4.layers.13.sigmoid.forward(
1 call @encoder.4.layers.13._se_reduce.forward(
1 call @encoder.4.layers.13._se_expand.forward(
1 call @encoder.4.layers.13.forward(
1 call @encoder.4.layers.13.f_add.mul(
1 call @encoder.4.layers.13.f_add.activation_post_process.forward(
1 call @encoder.4.layers.12.forward(
1 call @encoder.4.layers.11.forward(
1 call @encoder.4.layers.10.forward(
1 call @encoder.4.layers.0.forward(
1 call @encoder.4.layers.0.1.forward(
1 call @encoder.4.layers.0.0.forward(
1 call @encoder.4.forward(
1 call @encoder.3.skip_add.add(
1 call @encoder.3.skip_add.activation_post_process.forward(
1 call @encoder.3.layers.forward(
1 call @encoder.3.layers.9.forward(
1 call @encoder.3.layers.9.1.forward(
1 call @encoder.3.layers.9.0.forward(
1 call @encoder.3.layers.8.forward(
1 call @encoder.3.layers.7.forward(
1 call @encoder.3.layers.6.forward(
1 call @encoder.3.layers.5.forward(
1 call @encoder.3.layers.5.1.forward(
1 call @encoder.3.layers.5.0.forward(
1 call @encoder.3.layers.4.swish.sigmoid.forward(
1 call @encoder.3.layers.4.swish.forward(
1 call @encoder.3.layers.4.sigmoid.forward(
1 call @encoder.3.layers.4._se_reduce.forward(
1 call @encoder.3.layers.4._se_expand.forward(
1 call @encoder.3.layers.4.forward(
1 call @encoder.3.layers.4.f_add.mul(
1 call @encoder.3.layers.4.f_add.activation_post_process.forward(
1 call @encoder.3.layers.3.forward(
1 call @encoder.3.layers.2.forward(
1 call @encoder.3.layers.1.forward(
1 call @encoder.3.layers.17.forward(
1 call @encoder.3.layers.16.forward(
1 call @encoder.3.layers.15.forward(
1 call @encoder.3.layers.14.forward(
1 call @encoder.3.layers.14.1.forward(
1 call @encoder.3.layers.14.0.forward(
1 call @encoder.3.layers.13.swish.sigmoid.forward(
1 call @encoder.3.layers.13.swish.forward(
1 call @encoder.3.layers.13.sigmoid.forward(
1 call @encoder.3.layers.13._se_reduce.forward(
1 call @encoder.3.layers.13._se_expand.forward(
1 call @encoder.3.layers.13.forward(
1 call @encoder.3.layers.13.f_add.mul(
1 call @encoder.3.layers.13.f_add.activation_post_process.forward(
1 call @encoder.3.layers.12.forward(
1 call @encoder.3.layers.11.forward(
1 call @encoder.3.layers.10.forward(
1 call @encoder.3.layers.0.forward(
1 call @encoder.3.layers.0.1.forward(
1 call @encoder.3.layers.0.0.forward(
1 call @encoder.3.forward(
1 call @encoder.2.skip_add.add(
1 call @encoder.2.skip_add.activation_post_process.forward(
1 call @encoder.2.layers.forward(
1 call @encoder.2.layers.9.forward(
1 call @encoder.2.layers.9.1.forward(
1 call @encoder.2.layers.9.0.forward(
1 call @encoder.2.layers.8.forward(
1 call @encoder.2.layers.7.forward(
1 call @encoder.2.layers.6.forward(
1 call @encoder.2.layers.5.forward(
1 call @encoder.2.layers.5.1.forward(
1 call @encoder.2.layers.5.0.forward(
1 call @encoder.2.layers.4.swish.sigmoid.forward(
1 call @encoder.2.layers.4.swish.forward(
1 call @encoder.2.layers.4.sigmoid.forward(
1 call @encoder.2.layers.4._se_reduce.forward(
1 call @encoder.2.layers.4._se_expand.forward(
1 call @encoder.2.layers.4.forward(
1 call @encoder.2.layers.4.f_add.mul(
1 call @encoder.2.layers.4.f_add.activation_post_process.forward(
1 call @encoder.2.layers.3.forward(
1 call @encoder.2.layers.2.forward(
1 call @encoder.2.layers.1.forward(
1 call @encoder.2.layers.17.forward(
1 call @encoder.2.layers.16.forward(
1 call @encoder.2.layers.15.forward(
1 call @encoder.2.layers.14.forward(
1 call @encoder.2.layers.14.1.forward(
1 call @encoder.2.layers.14.0.forward(
1 call @encoder.2.layers.13.swish.sigmoid.forward(
1 call @encoder.2.layers.13.swish.forward(
1 call @encoder.2.layers.13.sigmoid.forward(
1 call @encoder.2.layers.13._se_reduce.forward(
1 call @encoder.2.layers.13._se_expand.forward(
1 call @encoder.2.layers.13.forward(
1 call @encoder.2.layers.13.f_add.mul(
1 call @encoder.2.layers.13.f_add.activation_post_process.forward(
1 call @encoder.2.layers.12.forward(
1 call @encoder.2.layers.11.forward(
1 call @encoder.2.layers.10.forward(
1 call @encoder.2.layers.0.forward(
1 call @encoder.2.layers.0.1.forward(
1 call @encoder.2.layers.0.0.forward(
1 call @encoder.2.forward(
1 call @encoder.20.skip_add.add(
1 call @encoder.20.skip_add.activation_post_process.forward(
1 call @encoder.20.layers.forward(
1 call @encoder.20.layers.8.forward(
1 call @encoder.20.layers.7.forward(
1 call @encoder.20.layers.6.forward(
1 call @encoder.20.layers.6._check_input_dim(
1 call @encoder.20.layers.5.forward(
1 call @encoder.20.layers.4.swish.sigmoid.forward(
1 call @encoder.20.layers.4.swish.forward(
1 call @encoder.20.layers.4.sigmoid.forward(
1 call @encoder.20.layers.4._se_reduce.forward(
1 call @encoder.20.layers.4._se_expand.forward(
1 call @encoder.20.layers.4.forward(
1 call @encoder.20.layers.4.f_add.mul(
1 call @encoder.20.layers.4.f_add.activation_post_process.forward(
1 call @encoder.20.layers.3.forward(
1 call @encoder.20.layers.2.forward(
1 call @encoder.20.layers.1.forward(
1 call @encoder.20.layers.1._check_input_dim(
1 call @encoder.20.layers.0.forward(
1 call @encoder.20.forward(
1 call @encoder.1.skip_add.add(
1 call @encoder.1.skip_add.activation_post_process.forward(
1 call @encoder.1.layers.forward(
1 call @encoder.1.layers.9.forward(
1 call @encoder.1.layers.9.1.forward(
1 call @encoder.1.layers.9.0.forward(
1 call @encoder.1.layers.8.forward(
1 call @encoder.1.layers.7.forward(
1 call @encoder.1.layers.6.forward(
1 call @encoder.1.layers.5.forward(
1 call @encoder.1.layers.5.1.forward(
1 call @encoder.1.layers.5.0.forward(
1 call @encoder.1.layers.4.swish.sigmoid.forward(
1 call @encoder.1.layers.4.swish.forward(
1 call @encoder.1.layers.4.sigmoid.forward(
1 call @encoder.1.layers.4._se_reduce.forward(
1 call @encoder.1.layers.4._se_expand.forward(
1 call @encoder.1.layers.4.forward(
1 call @encoder.1.layers.4.f_add.mul(
1 call @encoder.1.layers.4.f_add.activation_post_process.forward(
1 call @encoder.1.layers.3.forward(
1 call @encoder.1.layers.2.forward(
1 call @encoder.1.layers.1.forward(
1 call @encoder.1.layers.17.forward(
1 call @encoder.1.layers.16.forward(
1 call @encoder.1.layers.15.forward(
1 call @encoder.1.layers.14.forward(
1 call @encoder.1.layers.14.1.forward(
1 call @encoder.1.layers.14.0.forward(
1 call @encoder.1.layers.13.swish.sigmoid.forward(
1 call @encoder.1.layers.13.swish.forward(
1 call @encoder.1.layers.13.sigmoid.forward(
1 call @encoder.1.layers.13._se_reduce.forward(
1 call @encoder.1.layers.13._se_expand.forward(
1 call @encoder.1.layers.13.forward(
1 call @encoder.1.layers.13.f_add.mul(
1 call @encoder.1.layers.13.f_add.activation_post_process.forward(
1 call @encoder.1.layers.12.forward(
1 call @encoder.1.layers.11.forward(
1 call @encoder.1.layers.10.forward(
1 call @encoder.1.layers.0.forward(
1 call @encoder.1.layers.0.1.forward(
1 call @encoder.1.layers.0.0.forward(
1 call @encoder.1.forward(
1 call @encoder.19.skip_add.add(
1 call @encoder.19.skip_add.activation_post_process.forward(
1 call @encoder.19.layers.forward(
1 call @encoder.19.layers.8.forward(
1 call @encoder.19.layers.7.forward(
1 call @encoder.19.layers.6.forward(
1 call @encoder.19.layers.6._check_input_dim(
1 call @encoder.19.layers.5.forward(
1 call @encoder.19.layers.4.swish.sigmoid.forward(
1 call @encoder.19.layers.4.swish.forward(
1 call @encoder.19.layers.4.sigmoid.forward(
1 call @encoder.19.layers.4._se_reduce.forward(
1 call @encoder.19.layers.4._se_expand.forward(
1 call @encoder.19.layers.4.forward(
1 call @encoder.19.layers.4.f_add.mul(
1 call @encoder.19.layers.4.f_add.activation_post_process.forward(
1 call @encoder.19.layers.3.forward(
1 call @encoder.19.layers.2.forward(
1 call @encoder.19.layers.1.forward(
1 call @encoder.19.layers.1._check_input_dim(
1 call @encoder.19.layers.0.forward(
1 call @encoder.19.forward(
1 call @encoder.18.skip_add.add(
1 call @encoder.18.skip_add.activation_post_process.forward(
1 call @encoder.18.layers.forward(
1 call @encoder.18.layers.8.forward(
1 call @encoder.18.layers.7.forward(
1 call @encoder.18.layers.6.forward(
1 call @encoder.18.layers.6._check_input_dim(
1 call @encoder.18.layers.5.forward(
1 call @encoder.18.layers.4.swish.sigmoid.forward(
1 call @encoder.18.layers.4.swish.forward(
1 call @encoder.18.layers.4.sigmoid.forward(
1 call @encoder.18.layers.4._se_reduce.forward(
1 call @encoder.18.layers.4._se_expand.forward(
1 call @encoder.18.layers.4.forward(
1 call @encoder.18.layers.4.f_add.mul(
1 call @encoder.18.layers.4.f_add.activation_post_process.forward(
1 call @encoder.18.layers.3.forward(
1 call @encoder.18.layers.2.forward(
1 call @encoder.18.layers.1.forward(
1 call @encoder.18.layers.1._check_input_dim(
1 call @encoder.18.layers.0.forward(
1 call @encoder.18.forward(
1 call @encoder.17.skip_add.add(
1 call @encoder.17.skip_add.activation_post_process.forward(
1 call @encoder.17.layers.forward(
1 call @encoder.17.layers.8.forward(
1 call @encoder.17.layers.7.forward(
1 call @encoder.17.layers.6.forward(
1 call @encoder.17.layers.6._check_input_dim(
1 call @encoder.17.layers.5.forward(
1 call @encoder.17.layers.4.swish.sigmoid.forward(
1 call @encoder.17.layers.4.swish.forward(
1 call @encoder.17.layers.4.sigmoid.forward(
1 call @encoder.17.layers.4._se_reduce.forward(
1 call @encoder.17.layers.4._se_expand.forward(
1 call @encoder.17.layers.4.forward(
1 call @encoder.17.layers.4.f_add.mul(
1 call @encoder.17.layers.4.f_add.activation_post_process.forward(
1 call @encoder.17.layers.3.forward(
1 call @encoder.17.layers.2.forward(
1 call @encoder.17.layers.1.forward(
1 call @encoder.17.layers.1._check_input_dim(
1 call @encoder.17.layers.0.forward(
1 call @encoder.17.forward(
1 call @encoder.16.skip_add.add(
1 call @encoder.16.skip_add.activation_post_process.forward(
1 call @encoder.16.layers.forward(
1 call @encoder.16.layers.8.forward(
1 call @encoder.16.layers.7.forward(
1 call @encoder.16.layers.6.forward(
1 call @encoder.16.layers.6._check_input_dim(
1 call @encoder.16.layers.5.forward(
1 call @encoder.16.layers.4.swish.sigmoid.forward(
1 call @encoder.16.layers.4.swish.forward(
1 call @encoder.16.layers.4.sigmoid.forward(
1 call @encoder.16.layers.4._se_reduce.forward(
1 call @encoder.16.layers.4._se_expand.forward(
1 call @encoder.16.layers.4.forward(
1 call @encoder.16.layers.4.f_add.mul(
1 call @encoder.16.layers.4.f_add.activation_post_process.forward(
1 call @encoder.16.layers.3.forward(
1 call @encoder.16.layers.2.forward(
1 call @encoder.16.layers.1.forward(
1 call @encoder.16.layers.1._check_input_dim(
1 call @encoder.16.layers.0.forward(
1 call @encoder.16.forward(
1 call @encoder.15.skip_add.add(
1 call @encoder.15.skip_add.activation_post_process.forward(
1 call @encoder.15.layers.forward(
1 call @encoder.15.layers.8.forward(
1 call @encoder.15.layers.7.forward(
1 call @encoder.15.layers.6.forward(
1 call @encoder.15.layers.6._check_input_dim(
1 call @encoder.15.layers.5.forward(
1 call @encoder.15.layers.4.swish.sigmoid.forward(
1 call @encoder.15.layers.4.swish.forward(
1 call @encoder.15.layers.4.sigmoid.forward(
1 call @encoder.15.layers.4._se_reduce.forward(
1 call @encoder.15.layers.4._se_expand.forward(
1 call @encoder.15.layers.4.forward(
1 call @encoder.15.layers.4.f_add.mul(
1 call @encoder.15.layers.4.f_add.activation_post_process.forward(
1 call @encoder.15.layers.3.forward(
1 call @encoder.15.layers.2.forward(
1 call @encoder.15.layers.1.forward(
1 call @encoder.15.layers.1._check_input_dim(
1 call @encoder.15.layers.0.forward(
1 call @encoder.15.forward(
1 call @encoder.14.skip_add.add(
1 call @encoder.14.skip_add.activation_post_process.forward(
1 call @encoder.14.layers.forward(
1 call @encoder.14.layers.7.forward(
1 call @encoder.14.layers.6.forward(
1 call @encoder.14.layers.5.forward(
1 call @encoder.14.layers.4.forward(
1 call @encoder.14.layers.4.1.forward(
1 call @encoder.14.layers.4.0.forward(
1 call @encoder.14.layers.3.forward(
1 call @encoder.14.layers.2.forward(
1 call @encoder.14.layers.1.forward(
1 call @encoder.14.layers.0.forward(
1 call @encoder.14.layers.0.1.forward(
1 call @encoder.14.layers.0.0.forward(
1 call @encoder.14.forward(
1 call @encoder.13.skip_add.add(
1 call @encoder.13.skip_add.activation_post_process.forward(
1 call @encoder.13.layers.forward(
1 call @encoder.13.layers.9.forward(
1 call @encoder.13.layers.9.1.forward(
1 call @encoder.13.layers.9.0.forward(
1 call @encoder.13.layers.8.forward(
1 call @encoder.13.layers.7.forward(
1 call @encoder.13.layers.6.forward(
1 call @encoder.13.layers.5.forward(
1 call @encoder.13.layers.5.1.forward(
1 call @encoder.13.layers.5.0.forward(
1 call @encoder.13.layers.4.swish.sigmoid.forward(
1 call @encoder.13.layers.4.swish.forward(
1 call @encoder.13.layers.4.sigmoid.forward(
1 call @encoder.13.layers.4._se_reduce.forward(
1 call @encoder.13.layers.4._se_expand.forward(
1 call @encoder.13.layers.4.forward(
1 call @encoder.13.layers.4.f_add.mul(
1 call @encoder.13.layers.4.f_add.activation_post_process.forward(
1 call @encoder.13.layers.3.forward(
1 call @encoder.13.layers.2.forward(
1 call @encoder.13.layers.1.forward(
1 call @encoder.13.layers.17.forward(
1 call @encoder.13.layers.16.forward(
1 call @encoder.13.layers.15.forward(
1 call @encoder.13.layers.14.forward(
1 call @encoder.13.layers.14.1.forward(
1 call @encoder.13.layers.14.0.forward(
1 call @encoder.13.layers.13.swish.sigmoid.forward(
1 call @encoder.13.layers.13.swish.forward(
1 call @encoder.13.layers.13.sigmoid.forward(
1 call @encoder.13.layers.13._se_reduce.forward(
1 call @encoder.13.layers.13._se_expand.forward(
1 call @encoder.13.layers.13.forward(
1 call @encoder.13.layers.13.f_add.mul(
1 call @encoder.13.layers.13.f_add.activation_post_process.forward(
1 call @encoder.13.layers.12.forward(
1 call @encoder.13.layers.11.forward(
1 call @encoder.13.layers.10.forward(
1 call @encoder.13.layers.0.forward(
1 call @encoder.13.layers.0.1.forward(
1 call @encoder.13.layers.0.0.forward(
1 call @encoder.13.forward(
1 call @encoder.12.skip_add.add(
1 call @encoder.12.skip_add.activation_post_process.forward(
1 call @encoder.12.layers.forward(
1 call @encoder.12.layers.9.forward(
1 call @encoder.12.layers.9.1.forward(
1 call @encoder.12.layers.9.0.forward(
1 call @encoder.12.layers.8.forward(
1 call @encoder.12.layers.7.forward(
1 call @encoder.12.layers.6.forward(
1 call @encoder.12.layers.5.forward(
1 call @encoder.12.layers.5.1.forward(
1 call @encoder.12.layers.5.0.forward(
1 call @encoder.12.layers.4.swish.sigmoid.forward(
1 call @encoder.12.layers.4.swish.forward(
1 call @encoder.12.layers.4.sigmoid.forward(
1 call @encoder.12.layers.4._se_reduce.forward(
1 call @encoder.12.layers.4._se_expand.forward(
1 call @encoder.12.layers.4.forward(
1 call @encoder.12.layers.4.f_add.mul(
1 call @encoder.12.layers.4.f_add.activation_post_process.forward(
1 call @encoder.12.layers.3.forward(
1 call @encoder.12.layers.2.forward(
1 call @encoder.12.layers.1.forward(
1 call @encoder.12.layers.17.forward(
1 call @encoder.12.layers.16.forward(
1 call @encoder.12.layers.15.forward(
1 call @encoder.12.layers.14.forward(
1 call @encoder.12.layers.14.1.forward(
1 call @encoder.12.layers.14.0.forward(
1 call @encoder.12.layers.13.swish.sigmoid.forward(
1 call @encoder.12.layers.13.swish.forward(
1 call @encoder.12.layers.13.sigmoid.forward(
1 call @encoder.12.layers.13._se_reduce.forward(
1 call @encoder.12.layers.13._se_expand.forward(
1 call @encoder.12.layers.13.forward(
1 call @encoder.12.layers.13.f_add.mul(
1 call @encoder.12.layers.13.f_add.activation_post_process.forward(
1 call @encoder.12.layers.12.forward(
1 call @encoder.12.layers.11.forward(
1 call @encoder.12.layers.10.forward(
1 call @encoder.12.layers.0.forward(
1 call @encoder.12.layers.0.1.forward(
1 call @encoder.12.layers.0.0.forward(
1 call @encoder.12.forward(
1 call @encoder.11.skip_add.add(
1 call @encoder.11.skip_add.activation_post_process.forward(
1 call @encoder.11.layers.forward(
1 call @encoder.11.layers.9.forward(
1 call @encoder.11.layers.9.1.forward(
1 call @encoder.11.layers.9.0.forward(
1 call @encoder.11.layers.8.forward(
1 call @encoder.11.layers.7.forward(
1 call @encoder.11.layers.6.forward(
1 call @encoder.11.layers.5.forward(
1 call @encoder.11.layers.5.1.forward(
1 call @encoder.11.layers.5.0.forward(
1 call @encoder.11.layers.4.swish.sigmoid.forward(
1 call @encoder.11.layers.4.swish.forward(
1 call @encoder.11.layers.4.sigmoid.forward(
1 call @encoder.11.layers.4._se_reduce.forward(
1 call @encoder.11.layers.4._se_expand.forward(
1 call @encoder.11.layers.4.forward(
1 call @encoder.11.layers.4.f_add.mul(
1 call @encoder.11.layers.4.f_add.activation_post_process.forward(
1 call @encoder.11.layers.3.forward(
1 call @encoder.11.layers.2.forward(
1 call @encoder.11.layers.1.forward(
1 call @encoder.11.layers.17.forward(
1 call @encoder.11.layers.16.forward(
1 call @encoder.11.layers.15.forward(
1 call @encoder.11.layers.14.forward(
1 call @encoder.11.layers.14.1.forward(
1 call @encoder.11.layers.14.0.forward(
1 call @encoder.11.layers.13.swish.sigmoid.forward(
1 call @encoder.11.layers.13.swish.forward(
1 call @encoder.11.layers.13.sigmoid.forward(
1 call @encoder.11.layers.13._se_reduce.forward(
1 call @encoder.11.layers.13._se_expand.forward(
1 call @encoder.11.layers.13.forward(
1 call @encoder.11.layers.13.f_add.mul(
1 call @encoder.11.layers.13.f_add.activation_post_process.forward(
1 call @encoder.11.layers.12.forward(
1 call @encoder.11.layers.11.forward(
1 call @encoder.11.layers.10.forward(
1 call @encoder.11.layers.0.forward(
1 call @encoder.11.layers.0.1.forward(
1 call @encoder.11.layers.0.0.forward(
1 call @encoder.11.forward(
1 call @encoder.10.skip_add.add(
1 call @encoder.10.skip_add.activation_post_process.forward(
1 call @encoder.10.layers.forward(
1 call @encoder.10.layers.9.forward(
1 call @encoder.10.layers.9.1.forward(
1 call @encoder.10.layers.9.0.forward(
1 call @encoder.10.layers.8.forward(
1 call @encoder.10.layers.7.forward(
1 call @encoder.10.layers.6.forward(
1 call @encoder.10.layers.5.forward(
1 call @encoder.10.layers.5.1.forward(
1 call @encoder.10.layers.5.0.forward(
1 call @encoder.10.layers.4.swish.sigmoid.forward(
1 call @encoder.10.layers.4.swish.forward(
1 call @encoder.10.layers.4.sigmoid.forward(
1 call @encoder.10.layers.4._se_reduce.forward(
1 call @encoder.10.layers.4._se_expand.forward(
1 call @encoder.10.layers.4.forward(
1 call @encoder.10.layers.4.f_add.mul(
1 call @encoder.10.layers.4.f_add.activation_post_process.forward(
1 call @encoder.10.layers.3.forward(
1 call @encoder.10.layers.2.forward(
1 call @encoder.10.layers.1.forward(
1 call @encoder.10.layers.17.forward(
1 call @encoder.10.layers.16.forward(
1 call @encoder.10.layers.15.forward(
1 call @encoder.10.layers.14.forward(
1 call @encoder.10.layers.14.1.forward(
1 call @encoder.10.layers.14.0.forward(
1 call @encoder.10.layers.13.swish.sigmoid.forward(
1 call @encoder.10.layers.13.swish.forward(
1 call @encoder.10.layers.13.sigmoid.forward(
1 call @encoder.10.layers.13._se_reduce.forward(
1 call @encoder.10.layers.13._se_expand.forward(
1 call @encoder.10.layers.13.forward(
1 call @encoder.10.layers.13.f_add.mul(
1 call @encoder.10.layers.13.f_add.activation_post_process.forward(
1 call @encoder.10.layers.12.forward(
1 call @encoder.10.layers.11.forward(
1 call @encoder.10.layers.10.forward(
1 call @encoder.10.layers.0.forward(
1 call @encoder.10.layers.0.1.forward(
1 call @encoder.10.layers.0.0.forward(
1 call @encoder.10.forward(
1 call @encoder.0.skip_add.add(
1 call @encoder.0.skip_add.activation_post_process.forward(
1 call @encoder.0.layers.forward(
1 call @encoder.0.layers.7.forward(
1 call @encoder.0.layers.6.forward(
1 call @encoder.0.layers.5.forward(
1 call @encoder.0.layers.4.forward(
1 call @encoder.0.layers.4.1.forward(
1 call @encoder.0.layers.4.0.forward(
1 call @encoder.0.layers.3.forward(
1 call @encoder.0.layers.2.forward(
1 call @encoder.0.layers.1.forward(
1 call @encoder.0.layers.0.forward(
1 call @encoder.0.layers.0.1.forward(
1 call @encoder.0.layers.0.0.forward(
1 call @encoder.0.forward(
1 call @dequant.forward(
1 call @decoder.layers.9.self_attn.forward(
1 call @decoder.layers.9.norm2.forward(
1 call @decoder.layers.9.norm1.forward(
1 call @decoder.layers.9.linear2.forward(
1 call @decoder.layers.9.linear1.forward(
1 call @decoder.layers.9.forward(
1 call @decoder.layers.9.dropout.forward(
1 call @decoder.layers.9.dropout2.forward(
1 call @decoder.layers.9.dropout1.forward(
1 call @decoder.layers.8.self_attn.forward(
1 call @decoder.layers.8.norm2.forward(
1 call @decoder.layers.8.norm1.forward(
1 call @decoder.layers.8.linear2.forward(
1 call @decoder.layers.8.linear1.forward(
1 call @decoder.layers.8.forward(
1 call @decoder.layers.8.dropout.forward(
1 call @decoder.layers.8.dropout2.forward(
1 call @decoder.layers.8.dropout1.forward(
1 call @decoder.layers.7.self_attn.forward(
1 call @decoder.layers.7.norm2.forward(
1 call @decoder.layers.7.norm1.forward(
1 call @decoder.layers.7.linear2.forward(
1 call @decoder.layers.7.linear1.forward(
1 call @decoder.layers.7.forward(
1 call @decoder.layers.7.dropout.forward(
1 call @decoder.layers.7.dropout2.forward(
1 call @decoder.layers.7.dropout1.forward(
1 call @decoder.layers.6.self_attn.forward(
1 call @decoder.layers.6.norm2.forward(
1 call @decoder.layers.6.norm1.forward(
1 call @decoder.layers.6.linear2.forward(
1 call @decoder.layers.6.linear1.forward(
1 call @decoder.layers.6.forward(
1 call @decoder.layers.6.dropout.forward(
1 call @decoder.layers.6.dropout2.forward(
1 call @decoder.layers.6.dropout1.forward(
1 call @decoder.layers.5.self_attn.forward(
1 call @decoder.layers.5.norm2.forward(
1 call @decoder.layers.5.norm1.forward(
1 call @decoder.layers.5.linear2.forward(
1 call @decoder.layers.5.linear1.forward(
1 call @decoder.layers.5.forward(
1 call @decoder.layers.5.dropout.forward(
1 call @decoder.layers.5.dropout2.forward(
1 call @decoder.layers.5.dropout1.forward(
1 call @decoder.layers.4.self_attn.forward(
1 call @decoder.layers.4.norm2.forward(
1 call @decoder.layers.4.norm1.forward(
1 call @decoder.layers.4.linear2.forward(
1 call @decoder.layers.4.linear1.forward(
1 call @decoder.layers.4.forward(
1 call @decoder.layers.4.dropout.forward(
1 call @decoder.layers.4.dropout2.forward(
1 call @decoder.layers.4.dropout1.forward(
1 call @decoder.layers.3.self_attn.forward(
1 call @decoder.layers.3.norm2.forward(
1 call @decoder.layers.3.norm1.forward(
1 call @decoder.layers.3.linear2.forward(
1 call @decoder.layers.3.linear1.forward(
1 call @decoder.layers.3.forward(
1 call @decoder.layers.3.dropout.forward(
1 call @decoder.layers.3.dropout2.forward(
1 call @decoder.layers.3.dropout1.forward(
1 call @decoder.layers.2.self_attn.forward(
1 call @decoder.layers.2.norm2.forward(
1 call @decoder.layers.2.norm1.forward(
1 call @decoder.layers.2.linear2.forward(
1 call @decoder.layers.2.linear1.forward(
1 call @decoder.layers.2.forward(
1 call @decoder.layers.2.dropout.forward(
1 call @decoder.layers.2.dropout2.forward(
1 call @decoder.layers.2.dropout1.forward(
1 call @decoder.layers.1.self_attn.forward(
1 call @decoder.layers.1.norm2.forward(
1 call @decoder.layers.1.norm1.forward(
1 call @decoder.layers.1.linear2.forward(
1 call @decoder.layers.1.linear1.forward(
1 call @decoder.layers.1.forward(
1 call @decoder.layers.1.dropout.forward(
1 call @decoder.layers.1.dropout2.forward(
1 call @decoder.layers.1.dropout1.forward(
1 call @decoder.layers.15.self_attn.forward(
1 call @decoder.layers.15.norm2.forward(
1 call @decoder.layers.15.norm1.forward(
1 call @decoder.layers.15.linear2.forward(
1 call @decoder.layers.15.linear1.forward(
1 call @decoder.layers.15.forward(
1 call @decoder.layers.15.dropout.forward(
1 call @decoder.layers.15.dropout2.forward(
1 call @decoder.layers.15.dropout1.forward(
1 call @decoder.layers.14.self_attn.forward(
1 call @decoder.layers.14.norm2.forward(
1 call @decoder.layers.14.norm1.forward(
1 call @decoder.layers.14.linear2.forward(
1 call @decoder.layers.14.linear1.forward(
1 call @decoder.layers.14.forward(
1 call @decoder.layers.14.dropout.forward(
1 call @decoder.layers.14.dropout2.forward(
1 call @decoder.layers.14.dropout1.forward(
1 call @decoder.layers.13.self_attn.forward(
1 call @decoder.layers.13.norm2.forward(
1 call @decoder.layers.13.norm1.forward(
1 call @decoder.layers.13.linear2.forward(
1 call @decoder.layers.13.linear1.forward(
1 call @decoder.layers.13.forward(
1 call @decoder.layers.13.dropout.forward(
1 call @decoder.layers.13.dropout2.forward(
1 call @decoder.layers.13.dropout1.forward(
1 call @decoder.layers.12.self_attn.forward(
1 call @decoder.layers.12.norm2.forward(
1 call @decoder.layers.12.norm1.forward(
1 call @decoder.layers.12.linear2.forward(
1 call @decoder.layers.12.linear1.forward(
1 call @decoder.layers.12.forward(
1 call @decoder.layers.12.dropout.forward(
1 call @decoder.layers.12.dropout2.forward(
1 call @decoder.layers.12.dropout1.forward(
1 call @decoder.layers.11.self_attn.forward(
1 call @decoder.layers.11.norm2.forward(
1 call @decoder.layers.11.norm1.forward(
1 call @decoder.layers.11.linear2.forward(
1 call @decoder.layers.11.linear1.forward(
1 call @decoder.layers.11.forward(
1 call @decoder.layers.11.dropout.forward(
1 call @decoder.layers.11.dropout2.forward(
1 call @decoder.layers.11.dropout1.forward(
1 call @decoder.layers.10.self_attn.forward(
1 call @decoder.layers.10.norm2.forward(
1 call @decoder.layers.10.norm1.forward(
1 call @decoder.layers.10.linear2.forward(
1 call @decoder.layers.10.linear1.forward(
1 call @decoder.layers.10.forward(
1 call @decoder.layers.10.dropout.forward(
1 call @decoder.layers.10.dropout2.forward(
1 call @decoder.layers.10.dropout1.forward(
1 call @decoder.layers.0.self_attn.forward(
1 call @decoder.layers.0.norm2.forward(
1 call @decoder.layers.0.norm1.forward(
1 call @decoder.layers.0.linear2.forward(
1 call @decoder.layers.0.linear1.forward(
1 call @decoder.layers.0.forward(
1 call @decoder.layers.0.dropout.forward(
1 call @decoder.layers.0.dropout2.forward(
1 call @decoder.layers.0.dropout1.forward(
1 call @decoder.forward(
1 call @audio_normalize.reflect.forward(
1 call @audio_normalize.forward(
This file has been truncated, but you can view the full file.
module {
torch.global_slot "private" @n_fft : i64 {
%num320_i64 = basicpy.numeric_constant 320 : i64
torch.global_slot.init %num320_i64 : i64
}
torch.global_slot "private" @hop_length : i64 {
%num160_i64 = basicpy.numeric_constant 160 : i64
torch.global_slot.init %num160_i64 : i64
}
torch.global_slot "private" @win_length : i64 {
%num320_i64 = basicpy.numeric_constant 320 : i64
torch.global_slot.init %num320_i64 : i64
}
torch.global_slot "private" @audio_normalize.filter_ : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1x1x161xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1x1x161xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.0.skip : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.0.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x161x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x161x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.0.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.0.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.0.layers.4.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.0.layers.4.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.0.layers.7.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.1.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.1.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.1.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.1.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.1.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.1.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.2.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.2.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.2.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.2.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.2.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.2.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.3.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.3.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.3.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.3.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.3.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.3.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.4.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.4.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.4.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.4.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.4.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.4.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.5.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.5.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.5.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.5.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.5.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.5.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.6.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.6.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.6.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.6.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.6.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.6.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.7.skip : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.7.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.7.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.7.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.7.layers.4.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.7.layers.4.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.7.layers.7.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.8.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.8.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.8.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.8.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.8.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.8.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.9.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.9.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.9.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.9.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.9.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.9.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.10.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.10.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.10.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.10.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.10.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.10.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.11.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.11.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.11.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.11.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.11.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.11.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.12.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.12.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.12.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.12.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.12.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.12.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.13.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.13.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.13.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.13.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.12.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.13.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.13.layers.17.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.14.skip : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.14.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.14.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.14.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.14.layers.4.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.14.layers.4.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.14.layers.7.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.15.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.15.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @encoder.15.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.15.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.15.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.15.layers.6.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.15.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.16.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.16.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @encoder.16.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.16.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.16.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.16.layers.6.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.16.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.17.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.17.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @encoder.17.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.17.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.17.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.17.layers.6.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.17.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.18.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.18.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @encoder.18.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.18.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.18.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.18.layers.6.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.18.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.19.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.19.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @encoder.19.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.19.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.19.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.19.layers.6.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.19.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.20.skip : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @encoder.20.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x64x7xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x64x7xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @encoder.20.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.20.layers.3.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.20.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<102xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<102xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x102x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x102x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant dense<0> : tensor<i64>
%0 = numpy.create_array_from_tensor %cst : (tensor<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @encoder.20.layers.6.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @encoder.20.layers.8.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.0.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.0.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.0.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.0.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.0.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.0.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.0.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.0.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.0.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.0.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.0.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.0.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.0.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.0.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.0.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.1.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.1.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.1.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.1.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.1.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.1.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.1.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.1.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.1.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.1.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.1.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.1.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.1.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.1.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.1.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.2.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.2.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.2.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.2.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.2.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.2.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.2.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.2.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.2.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.2.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.2.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.2.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.2.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.2.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.2.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.3.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.3.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.3.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.3.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.3.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.3.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.3.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.3.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.3.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.3.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.3.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.3.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.3.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.3.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.3.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.4.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.4.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.4.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.4.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.4.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.4.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.4.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.4.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.4.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.4.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.4.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.4.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.4.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.4.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.4.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.5.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.5.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.5.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.5.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.5.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.5.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.5.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.5.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.5.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.5.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.5.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.5.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.5.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.5.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.5.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.6.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.6.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.6.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.6.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.6.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.6.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.6.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.6.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.6.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.6.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.6.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.6.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.6.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.6.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.6.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.7.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.7.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.7.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.7.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.7.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.7.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.7.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.7.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.7.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.7.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.7.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.7.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.7.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.7.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.7.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.8.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.8.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.8.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.8.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.8.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.8.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.8.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.8.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.8.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.8.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.8.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.8.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.8.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.8.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.8.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.9.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.9.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.9.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.9.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.9.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.9.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.9.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.9.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.9.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.9.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.9.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.9.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.9.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.9.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.9.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.10.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.10.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.10.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.10.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.10.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.10.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.10.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.10.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.10.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.10.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.10.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.10.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.10.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.10.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.10.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.11.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.11.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.11.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.11.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.11.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.11.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.11.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.11.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.11.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.11.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.11.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.11.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.11.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.11.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.11.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.12.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.12.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.12.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.12.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.12.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.12.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.12.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.12.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.12.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.12.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.12.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.12.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.12.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.12.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.12.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.13.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.13.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.13.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.13.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.13.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.13.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.13.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.13.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.13.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.13.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.13.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.13.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.13.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.13.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.13.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.14.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.14.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.14.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.14.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.14.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.14.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.14.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.14.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.14.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.14.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.14.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.14.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.14.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.14.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.14.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.15.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.self_attn.q_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.15.self_attn.k_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.15.self_attn.v_proj_weight : !basicpy.NoneType {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.15.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<1536xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<1536xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.self_attn.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.15.self_attn.embed_dim : i64 {
%num512_i64 = basicpy.numeric_constant 512 : i64
torch.global_slot.init %num512_i64 : i64
}
torch.global_slot "private" @decoder.layers.15.self_attn._qkv_same_embed_dim : !basicpy.BoolType {
%bool_true = basicpy.bool_constant true
torch.global_slot.init %bool_true : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.15.self_attn.num_heads : i64 {
%num2_i64 = basicpy.numeric_constant 2 : i64
torch.global_slot.init %num2_i64 : i64
}
torch.global_slot "private" @decoder.layers.15.self_attn.dropout : f64 {
%num = basicpy.numeric_constant 1.000000e-01 : f64
torch.global_slot.init %num : f64
}
torch.global_slot "private" @decoder.layers.15.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.15.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%0 = basicpy.singleton : !basicpy.NoneType
torch.global_slot.init %0 : !basicpy.NoneType
}
torch.global_slot "private" @decoder.layers.15.self_attn.add_zero_attn : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.15.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.dropout.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.15.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512x512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512x512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<512xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<512xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @decoder.layers.15.dropout1.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @decoder.layers.15.dropout2.training : !basicpy.BoolType {
%bool_false = basicpy.bool_constant false
torch.global_slot.init %bool_false : !basicpy.BoolType
}
torch.global_slot "private" @fc.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<999x512x1xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<999x512x1xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @fc.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<999xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<999xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @audio_normalize.reflect.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bytes = basicpy.bytes_constant "reflect"
%c80_i64 = constant 80 : i64
%cst = constant 0.000000e+00 : f64
%0 = basicpy.build_list %c80_i64, %c80_i64 : (i64, i64) -> !basicpy.ListType
%1 = call @__torch__.torch.nn.functional._pad$0(%arg0, %0, %bytes, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BytesType, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional._pad$0(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !basicpy.ListType, %arg2: !basicpy.BytesType, %arg3: f64) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bytes = basicpy.bytes_constant "circular"
%bytes_0 = basicpy.bytes_constant "replicate"
%bytes_1 = basicpy.bytes_constant "reflect"
%bytes_2 = basicpy.bytes_constant "constant"
%bytes_3 = basicpy.bytes_constant "Exception"
%c2_i64 = constant 2 : i64
%c0_i64 = constant 0 : i64
%c3_i64 = constant 3 : i64
%c4_i64 = constant 4 : i64
%c5_i64 = constant 5 : i64
%c6_i64 = constant 6 : i64
%0 = torch.prim.Uninitialized : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::remainder" %1, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%3 = torch.kernel_call "aten::eq" %2, %c0_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
scf.if %4 {
} else {
torch.prim.RaiseException %bytes_3
}
%5 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%6 = torch.kernel_call "aten::floordiv" %5, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%8 = torch.kernel_call "aten::le" %6, %7 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%9 = basicpy.bool_cast %8 : !basicpy.BoolType -> i1
scf.if %9 {
} else {
torch.prim.RaiseException %bytes_3
}
%10 = torch.kernel_call "aten::eq" %arg2, %bytes_2 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%11 = basicpy.bool_cast %10 : !basicpy.BoolType -> i1
%12 = scf.if %11 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%13 = torch.kernel_call "aten::constant_pad_nd" %arg0, %arg1, %arg3 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, f64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %13 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%13 = torch.kernel_call "aten::eq" %arg3, %c0_i64 : (f64, i64) -> !basicpy.BoolType {sigArgTypes = ["float", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%14 = basicpy.bool_cast %13 : !basicpy.BoolType -> i1
scf.if %14 {
} else {
torch.prim.RaiseException %bytes_3
}
%15 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%16 = torch.kernel_call "aten::eq" %15, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%17 = basicpy.bool_cast %16 : !basicpy.BoolType -> i1
%18 = scf.if %17 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%19 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%20 = torch.kernel_call "aten::eq" %19, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%21 = basicpy.bool_cast %20 : !basicpy.BoolType -> i1
scf.if %21 {
} else {
torch.prim.RaiseException %bytes_3
}
%22 = torch.kernel_call "aten::eq" %arg2, %bytes_1 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%23 = basicpy.bool_cast %22 : !basicpy.BoolType -> i1
%24 = scf.if %23 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%25 = torch.kernel_call "aten::reflection_pad1d" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %25 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%25 = torch.kernel_call "aten::eq" %arg2, %bytes_0 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%26 = basicpy.bool_cast %25 : !basicpy.BoolType -> i1
%27 = scf.if %26 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%28 = torch.kernel_call "aten::replication_pad1d" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %28 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%28 = torch.kernel_call "aten::eq" %arg2, %bytes : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%29 = basicpy.bool_cast %28 : !basicpy.BoolType -> i1
%30 = scf.if %29 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%31 = call @__torch__.torch.nn.functional._pad_circular$1(%arg0, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %31 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
torch.prim.RaiseException %bytes_3
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %30 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %27 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %24 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%19 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%20 = torch.kernel_call "aten::eq" %19, %c4_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%21 = basicpy.bool_cast %20 : !basicpy.BoolType -> i1
%22 = scf.if %21 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%23 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%24 = torch.kernel_call "aten::eq" %23, %c4_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%25 = basicpy.bool_cast %24 : !basicpy.BoolType -> i1
scf.if %25 {
} else {
torch.prim.RaiseException %bytes_3
}
%26 = torch.kernel_call "aten::eq" %arg2, %bytes_1 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%27 = basicpy.bool_cast %26 : !basicpy.BoolType -> i1
%28 = scf.if %27 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%29 = torch.kernel_call "aten::reflection_pad2d" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %29 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%29 = torch.kernel_call "aten::eq" %arg2, %bytes_0 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%30 = basicpy.bool_cast %29 : !basicpy.BoolType -> i1
%31 = scf.if %30 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%32 = torch.kernel_call "aten::replication_pad2d" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %32 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%32 = torch.kernel_call "aten::eq" %arg2, %bytes : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%33 = basicpy.bool_cast %32 : !basicpy.BoolType -> i1
%34 = scf.if %33 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%35 = call @__torch__.torch.nn.functional._pad_circular$1(%arg0, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %35 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
torch.prim.RaiseException %bytes_3
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %34 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %31 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %28 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%23 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%24 = torch.kernel_call "aten::eq" %23, %c5_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%25 = basicpy.bool_cast %24 : !basicpy.BoolType -> i1
%26 = scf.if %25 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%27 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%28 = torch.kernel_call "aten::eq" %27, %c6_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%29 = basicpy.bool_cast %28 : !basicpy.BoolType -> i1
scf.if %29 {
} else {
torch.prim.RaiseException %bytes_3
}
%30 = torch.kernel_call "aten::eq" %arg2, %bytes_1 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%31 = basicpy.bool_cast %30 : !basicpy.BoolType -> i1
%32 = scf.if %31 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
torch.prim.RaiseException %bytes_3
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%33 = torch.kernel_call "aten::eq" %arg2, %bytes_0 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%34 = basicpy.bool_cast %33 : !basicpy.BoolType -> i1
%35 = scf.if %34 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%36 = torch.kernel_call "aten::replication_pad3d" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %36 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%36 = torch.kernel_call "aten::eq" %arg2, %bytes : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BoolType {sigArgTypes = ["str", "str"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%37 = basicpy.bool_cast %36 : !basicpy.BoolType -> i1
%38 = scf.if %37 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%39 = call @__torch__.torch.nn.functional._pad_circular$1(%arg0, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %39 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
torch.prim.RaiseException %bytes_3
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %38 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %35 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %32 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
torch.prim.RaiseException %bytes_3
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %26 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %22 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %18 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %12 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional._pad_circular$1(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c0_i64 = constant 0 : i64
%c9223372036854775807_i64 = constant 9223372036854775807 : i64
%c1_i64 = constant 1 : i64
%c2_i64 = constant 2 : i64
%c-1_i64 = constant -1 : i64
%c-2_i64 = constant -2 : i64
%c3_i64 = constant 3 : i64
%c-3_i64 = constant -3 : i64
%c-4_i64 = constant -4 : i64
%c4_i64 = constant 4 : i64
%c-5_i64 = constant -5 : i64
%c-6_i64 = constant -6 : i64
%0 = torch.kernel_call "aten::slice" %arg0, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = torch.kernel_call "aten::slice" %0, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%2 = torch.kernel_call "aten::__getitem__" %arg1, %c-1_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%3 = torch.kernel_call "aten::slice" %1, %c2_i64, %c0_i64, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = basicpy.build_list %arg0, %3 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%5 = torch.kernel_call "aten::cat" %4, %c2_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%6 = torch.kernel_call "aten::slice" %5, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%7 = torch.kernel_call "aten::slice" %6, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%8 = torch.kernel_call "aten::__getitem__" %arg1, %c-1_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%9 = torch.kernel_call "aten::__getitem__" %arg1, %c-2_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%10 = torch.kernel_call "aten::add" %8, %9 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%11 = torch.kernel_call "aten::neg" %10 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%12 = torch.kernel_call "aten::__getitem__" %arg1, %c-1_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%13 = torch.kernel_call "aten::neg" %12 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%14 = torch.kernel_call "aten::slice" %7, %c2_i64, %11, %13, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%15 = basicpy.build_list %14, %5 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%16 = torch.kernel_call "aten::cat" %15, %c2_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%17 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%18 = torch.kernel_call "aten::gt" %17, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%19 = basicpy.bool_cast %18 : !basicpy.BoolType -> i1
%20 = scf.if %19 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%25 = torch.kernel_call "aten::slice" %16, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%26 = torch.kernel_call "aten::slice" %25, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%27 = torch.kernel_call "aten::slice" %26, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%28 = torch.kernel_call "aten::__getitem__" %arg1, %c-3_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%29 = torch.kernel_call "aten::slice" %27, %c3_i64, %c0_i64, %28, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%30 = basicpy.build_list %16, %29 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%31 = torch.kernel_call "aten::cat" %30, %c3_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%32 = torch.kernel_call "aten::slice" %31, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%33 = torch.kernel_call "aten::slice" %32, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%34 = torch.kernel_call "aten::slice" %33, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%35 = torch.kernel_call "aten::__getitem__" %arg1, %c-3_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%36 = torch.kernel_call "aten::__getitem__" %arg1, %c-4_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%37 = torch.kernel_call "aten::add" %35, %36 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%38 = torch.kernel_call "aten::neg" %37 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%39 = torch.kernel_call "aten::__getitem__" %arg1, %c-3_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%40 = torch.kernel_call "aten::neg" %39 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%41 = torch.kernel_call "aten::slice" %34, %c3_i64, %38, %40, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%42 = basicpy.build_list %41, %31 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%43 = torch.kernel_call "aten::cat" %42, %c3_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %43 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %16 : !numpy.ndarray<*:!numpy.any_dtype>
}
%21 = torch.kernel_call "aten::len" %arg1 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%22 = torch.kernel_call "aten::gt" %21, %c4_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%23 = basicpy.bool_cast %22 : !basicpy.BoolType -> i1
%24 = scf.if %23 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%25 = torch.kernel_call "aten::slice" %20, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%26 = torch.kernel_call "aten::slice" %25, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%27 = torch.kernel_call "aten::slice" %26, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%28 = torch.kernel_call "aten::slice" %27, %c3_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%29 = torch.kernel_call "aten::__getitem__" %arg1, %c-5_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%30 = torch.kernel_call "aten::slice" %28, %c4_i64, %c0_i64, %29, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%31 = basicpy.build_list %20, %30 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%32 = torch.kernel_call "aten::cat" %31, %c4_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%33 = torch.kernel_call "aten::slice" %32, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%34 = torch.kernel_call "aten::slice" %33, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%35 = torch.kernel_call "aten::slice" %34, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%36 = torch.kernel_call "aten::slice" %35, %c3_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%37 = torch.kernel_call "aten::__getitem__" %arg1, %c-5_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%38 = torch.kernel_call "aten::__getitem__" %arg1, %c-6_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%39 = torch.kernel_call "aten::add" %37, %38 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%40 = torch.kernel_call "aten::neg" %39 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%41 = torch.kernel_call "aten::__getitem__" %arg1, %c-5_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%42 = torch.kernel_call "aten::neg" %41 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%43 = torch.kernel_call "aten::slice" %36, %c4_i64, %40, %42, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%44 = basicpy.build_list %43, %32 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%45 = torch.kernel_call "aten::cat" %44, %c4_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %45 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %20 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %24 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @audio_normalize.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%c1048576_i64 = constant 1048576 : i64
%c2_i64 = constant 2 : i64
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%c9223372036854775807_i64 = constant 9223372036854775807 : i64
%c-1_i64 = constant -1 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::mul" %arg0, %c1048576_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%2 = torch.kernel_call "aten::log1p" %1 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::size" %2 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%4 = torch.kernel_call "aten::len" %3 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%5 = torch.kernel_call "aten::eq" %4, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%6 = basicpy.bool_cast %5 : !basicpy.BoolType -> i1
%7 = scf.if %6 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%20 = torch.kernel_call "aten::unsqueeze" %2, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%21 = torch.kernel_call "aten::slice" %20, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%22 = torch.kernel_call "aten::slice" %21, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %22 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %2 : !numpy.ndarray<*:!numpy.any_dtype>
}
%8 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%9 = torch.kernel_call "aten::mean" %7, %8, %bool_true, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%10 = call @audio_normalize.reflect.forward(%9) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = torch.global_slot.get @audio_normalize.filter_ : !numpy.ndarray<*:!numpy.any_dtype>
%12 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%13 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%14 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%15 = torch.kernel_call "aten::conv1d" %10, %11, %0, %12, %13, %14, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.NoneType, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%16 = basicpy.build_list %c-1_i64 : (i64) -> !basicpy.ListType
%17 = torch.kernel_call "aten::mean" %15, %16, %bool_true, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%18 = torch.kernel_call "aten::neg" %17 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%19 = torch.kernel_call "aten::add" %7, %18, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %19 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.0.layers.0.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%c1_i64 = constant 1 : i64
%0 = torch.global_slot.get @encoder.0.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.0.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional.relu$2(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = basicpy.bool_cast %arg1 : !basicpy.BoolType -> i1
%1 = scf.if %0 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%2 = torch.kernel_call "aten::relu_" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %2 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%2 = torch.kernel_call "aten::relu" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %2 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.0.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.0.layers.0.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.0.layers.0.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional.dropout$3(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: f64, %arg2: !basicpy.BoolType, %arg3: !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bytes = basicpy.bytes_constant "Exception"
%bool_true = basicpy.bool_constant true
%cst = constant 0.000000e+00 : f64
%cst_0 = constant 1.000000e+00 : f64
%0 = torch.kernel_call "aten::lt" %arg1, %cst : (f64, f64) -> !basicpy.BoolType {sigArgTypes = ["float", "float"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%1 = basicpy.bool_cast %0 : !basicpy.BoolType -> i1
%2 = scf.if %1 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%6 = torch.kernel_call "aten::gt" %arg1, %cst_0 : (f64, f64) -> !basicpy.BoolType {sigArgTypes = ["float", "float"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %6 : !basicpy.BoolType
}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
scf.if %3 {
torch.prim.RaiseException %bytes
} else {
}
%4 = basicpy.bool_cast %arg3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%6 = torch.kernel_call "aten::dropout_" %arg0, %arg1, %arg2 : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "float", "bool"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %6 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%6 = torch.kernel_call "aten::dropout" %arg0, %arg1, %arg2 : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "float", "bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %6 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.0.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.0.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.0.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.0.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.0.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.0.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.0.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.0.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.0.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %7 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.0.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.0.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.0.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional.batch_norm$4(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg5: !basicpy.BoolType, %arg6: f64, %arg7: f64) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = basicpy.bool_cast %arg5 : !basicpy.BoolType -> i1
scf.if %0 {
%2 = torch.kernel_call "aten::size" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%3 = call @__torch__.torch.nn.functional._verify_batch_size$5(%2) : (!basicpy.ListType) -> !basicpy.NoneType
} else {
}
%1 = torch.kernel_call "aten::batch_norm" %arg0, %arg3, %arg4, %arg1, %arg2, %arg5, %arg6, %arg7, %bool_true : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor?", "Tensor?", "Tensor?", "Tensor?", "bool", "float", "float", "bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional._verify_batch_size$5(%arg0: !basicpy.ListType) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c2_i64 = constant 2 : i64
%c1_i64 = constant 1 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::__getitem__" %arg0, %c0_i64 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%2 = torch.kernel_call "aten::len" %arg0 : (!basicpy.ListType) -> i64 {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%3 = torch.kernel_call "aten::sub" %2, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%4 = torch.prim.Loop %3, %bool_true, init(%1) {
^bb0(%arg1: i64, %arg2: i64): // no predecessors
%7 = torch.kernel_call "aten::add" %arg1, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%8 = torch.kernel_call "aten::__getitem__" %arg0, %7 : (!basicpy.ListType, i64) -> i64 {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%9 = torch.kernel_call "aten::mul" %arg2, %8 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
torch.prim.Loop.condition %bool_true iter(%9) : !basicpy.BoolType, (i64)
} : (i64, !basicpy.BoolType, i64) -> i64
%5 = torch.kernel_call "aten::eq" %4, %c1_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%6 = basicpy.bool_cast %5 : !basicpy.BoolType -> i1
scf.if %6 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @encoder.9.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = call @encoder.10.forward(%9) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = call @encoder.11.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = call @encoder.12.forward(%11) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = call @encoder.13.forward(%12) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%14 = call @encoder.14.forward(%13) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = call @encoder.15.forward(%14) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%16 = call @encoder.16.forward(%15) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%17 = call @encoder.17.forward(%16) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%18 = call @encoder.18.forward(%17) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%19 = call @encoder.19.forward(%18) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%20 = call @encoder.20.forward(%19) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %20 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional.linear$6(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c1_i64 = constant 1 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::eq" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%7 = torch.kernel_call "aten::__isnot__" %arg2, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
%6 = scf.if %5 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%7 = torch.prim.unchecked_cast %arg2 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.kernel_call "aten::t" %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%9 = torch.kernel_call "aten::addmm" %7, %arg0, %8, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %9 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%7 = torch.kernel_call "aten::t" %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%8 = torch.kernel_call "aten::matmul" %arg0, %7 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%9 = torch.kernel_call "aten::__isnot__" %arg2, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%10 = basicpy.bool_cast %9 : !basicpy.BoolType -> i1
%11 = scf.if %10 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%12 = torch.prim.unchecked_cast %arg2 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = torch.kernel_call "aten::add_" %8, %12, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %13 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %6 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: i64, %arg4: i64, %arg5: !numpy.ndarray<*:!numpy.any_dtype>, %arg6: !numpy.ndarray<*:!numpy.any_dtype>, %arg7: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg8: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg9: !basicpy.BoolType, %arg10: f64, %arg11: !numpy.ndarray<*:!numpy.any_dtype>, %arg12: !numpy.ndarray<*:!numpy.any_dtype>, %arg13: !basicpy.BoolType, %arg14: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg15: !basicpy.BoolType, %arg16: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg17: !basicpy.BoolType, %arg18: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg19: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg20: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg21: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg22: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%cst = constant 0xFFF0000000000000 : f64
%bytes = basicpy.bytes_constant "constant"
%bool_true = basicpy.bool_constant true
%bool_false = basicpy.bool_constant false
%bytes_0 = basicpy.bytes_constant "Exception"
%bytes_1 = basicpy.bytes_constant "Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
%bytes_2 = basicpy.bytes_constant "Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%cst_3 = constant -5.000000e-01 : f64
%c3_i64 = constant 3 : i64
%c-1_i64 = constant -1 : i64
%c9223372036854775807_i64 = constant 9223372036854775807 : i64
%c2_i64 = constant 2 : i64
%c6_i64 = constant 6 : i64
%c7_i64 = constant 7 : i64
%c5_i64 = constant 5 : i64
%c11_i64 = constant 11 : i64
%cst_4 = constant 0.000000e+00 : f64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.prim.Uninitialized : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = torch.prim.Uninitialized : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = torch.kernel_call "aten::size" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%4:3 = torch.prim.ListUnpack %3 : !basicpy.ListType -> i64, i64, i64
%5 = torch.kernel_call "aten::eq" %4#2, %arg3 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%6 = basicpy.bool_cast %5 : !basicpy.BoolType -> i1
scf.if %6 {
} else {
torch.prim.RaiseException %bytes_0
}
%7 = torch.kernel_call "aten::size" %arg1, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%8 = torch.kernel_call "aten::size" %arg2, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%9 = torch.kernel_call "aten::eq" %7, %8 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%10 = basicpy.bool_cast %9 : !basicpy.BoolType -> i1
%11 = scf.if %10 -> (!basicpy.BoolType) {
%96 = torch.kernel_call "aten::size" %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%97 = torch.kernel_call "aten::size" %arg2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%98 = torch.kernel_call "aten::eq" %96, %97 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %98 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%12 = basicpy.bool_cast %11 : !basicpy.BoolType -> i1
scf.if %12 {
} else {
torch.prim.RaiseException %bytes_0
}
%13 = torch.kernel_call "aten::floordiv" %4#2, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%14 = torch.kernel_call "aten::mul" %13, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%15 = torch.kernel_call "aten::eq" %14, %4#2 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%16 = basicpy.bool_cast %15 : !basicpy.BoolType -> i1
scf.if %16 {
} else {
torch.prim.RaiseException %bytes_0
}
%17 = torch.kernel_call "aten::Float" %13 : (i64) -> f64 {sigArgTypes = ["Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["float"]}
%18 = torch.kernel_call "aten::pow" %17, %cst_3 : (f64, f64) -> f64 {sigArgTypes = ["float", "float"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["float"]}
%19 = torch.kernel_call "aten::__not__" %arg17 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%20 = basicpy.bool_cast %19 : !basicpy.BoolType -> i1
%21:3 = scf.if %20 -> (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) {
%96 = torch.kernel_call "aten::equal" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.BoolType {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%97 = basicpy.bool_cast %96 : !basicpy.BoolType -> i1
%98 = scf.if %97 -> (!basicpy.BoolType) {
%101 = torch.kernel_call "aten::equal" %arg1, %arg2 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.BoolType {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %101 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%99 = basicpy.bool_cast %98 : !basicpy.BoolType -> i1
%100:3 = scf.if %99 -> (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) {
%101 = torch.derefine %arg6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%102 = call @__torch__.torch.nn.functional.linear$6(%arg0, %arg5, %101) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%103 = torch.kernel_call "aten::chunk" %102, %c3_i64, %c-1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !basicpy.ListType {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor[]"]}
%104:3 = torch.prim.ListUnpack %103 : !basicpy.ListType -> !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %104#0, %104#1, %104#2 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
} else {
%101 = torch.kernel_call "aten::equal" %arg1, %arg2 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.BoolType {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%102 = basicpy.bool_cast %101 : !basicpy.BoolType -> i1
%103:3 = scf.if %102 -> (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) {
%104 = torch.kernel_call "aten::slice" %arg5, %c0_i64, %c0_i64, %4#2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%105 = torch.kernel_call "aten::slice" %104, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%106 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %c0_i64, %4#2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%107 = torch.derefine %106 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%108 = call @__torch__.torch.nn.functional.linear$6(%arg0, %105, %107) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%109 = torch.kernel_call "aten::slice" %arg5, %c0_i64, %4#2, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%110 = torch.kernel_call "aten::slice" %109, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%111 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %4#2, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%112 = torch.derefine %111 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%113 = call @__torch__.torch.nn.functional.linear$6(%arg1, %110, %112) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%114 = torch.kernel_call "aten::chunk" %113, %c2_i64, %c-1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !basicpy.ListType {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor[]"]}
%115:2 = torch.prim.ListUnpack %114 : !basicpy.ListType -> !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %108, %115#0, %115#1 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
} else {
%104 = torch.kernel_call "aten::slice" %arg5, %c0_i64, %c0_i64, %4#2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%105 = torch.kernel_call "aten::slice" %104, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%106 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %c0_i64, %4#2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%107 = torch.derefine %106 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%108 = call @__torch__.torch.nn.functional.linear$6(%arg0, %105, %107) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%109 = torch.kernel_call "aten::mul" %4#2, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%110 = torch.kernel_call "aten::slice" %arg5, %c0_i64, %4#2, %109, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%111 = torch.kernel_call "aten::slice" %110, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%112 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %4#2, %109, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%113 = torch.derefine %112 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%114 = call @__torch__.torch.nn.functional.linear$6(%arg1, %111, %113) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%115 = torch.kernel_call "aten::mul" %4#2, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%116 = torch.kernel_call "aten::slice" %arg5, %c0_i64, %115, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%117 = torch.kernel_call "aten::slice" %116, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%118 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %115, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%119 = torch.derefine %118 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%120 = call @__torch__.torch.nn.functional.linear$6(%arg2, %117, %119) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %108, %114, %120 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %103#0, %103#1, %103#2 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %100#0, %100#1, %100#2 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
} else {
%96 = torch.kernel_call "aten::_unwrap_optional" %arg18 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["t?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%97 = torch.kernel_call "aten::size" %96 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%98:2 = torch.prim.ListUnpack %97 : !basicpy.ListType -> i64, i64
%99 = torch.kernel_call "aten::eq" %98#0, %4#2 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%100 = basicpy.bool_cast %99 : !basicpy.BoolType -> i1
%101 = scf.if %100 -> (!basicpy.BoolType) {
%128 = torch.kernel_call "aten::size" %arg0, %c-1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%129 = torch.kernel_call "aten::eq" %98#1, %128 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %129 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%102 = basicpy.bool_cast %101 : !basicpy.BoolType -> i1
scf.if %102 {
} else {
torch.prim.RaiseException %bytes_0
}
%103 = torch.kernel_call "aten::_unwrap_optional" %arg19 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["t?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%104 = torch.kernel_call "aten::size" %103 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%105:2 = torch.prim.ListUnpack %104 : !basicpy.ListType -> i64, i64
%106 = torch.kernel_call "aten::eq" %105#0, %4#2 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%107 = basicpy.bool_cast %106 : !basicpy.BoolType -> i1
%108 = scf.if %107 -> (!basicpy.BoolType) {
%128 = torch.kernel_call "aten::size" %arg1, %c-1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%129 = torch.kernel_call "aten::eq" %105#1, %128 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %129 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%109 = basicpy.bool_cast %108 : !basicpy.BoolType -> i1
scf.if %109 {
} else {
torch.prim.RaiseException %bytes_0
}
%110 = torch.kernel_call "aten::_unwrap_optional" %arg20 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["t?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t"]}
%111 = torch.kernel_call "aten::size" %110 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%112:2 = torch.prim.ListUnpack %111 : !basicpy.ListType -> i64, i64
%113 = torch.kernel_call "aten::eq" %112#0, %4#2 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%114 = basicpy.bool_cast %113 : !basicpy.BoolType -> i1
%115 = scf.if %114 -> (!basicpy.BoolType) {
%128 = torch.kernel_call "aten::size" %arg2, %c-1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%129 = torch.kernel_call "aten::eq" %112#1, %128 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %129 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%116 = basicpy.bool_cast %115 : !basicpy.BoolType -> i1
scf.if %116 {
} else {
torch.prim.RaiseException %bytes_0
}
%117 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %c0_i64, %4#2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%118 = torch.derefine %117 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%119 = call @__torch__.torch.nn.functional.linear$6(%arg0, %96, %118) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%120 = torch.kernel_call "aten::mul" %4#2, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%121 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %4#2, %120, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%122 = torch.derefine %121 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%123 = call @__torch__.torch.nn.functional.linear$6(%arg1, %103, %122) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%124 = torch.kernel_call "aten::mul" %4#2, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%125 = torch.kernel_call "aten::slice" %arg6, %c0_i64, %124, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%126 = torch.derefine %125 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%127 = call @__torch__.torch.nn.functional.linear$6(%arg2, %110, %126) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %119, %123, %127 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>
}
%22 = torch.kernel_call "aten::mul" %21#0, %18 : (!numpy.ndarray<*:!numpy.any_dtype>, f64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%23 = torch.kernel_call "aten::__isnot__" %arg16, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%24 = basicpy.bool_cast %23 : !basicpy.BoolType -> i1
%25 = scf.if %24 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%96 = torch.prim.unchecked_cast %arg16 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%98 = torch.kernel_call "aten::eq" %97, %c6_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%99 = basicpy.bool_cast %98 : !basicpy.BoolType -> i1
%100 = scf.if %99 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%117 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%118 = torch.kernel_call "aten::eq" %117, %c7_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %118 : !basicpy.BoolType
}
%101 = basicpy.bool_cast %100 : !basicpy.BoolType -> i1
%102 = scf.if %101 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%117 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%118 = torch.kernel_call "aten::eq" %117, %c5_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %118 : !basicpy.BoolType
}
%103 = basicpy.bool_cast %102 : !basicpy.BoolType -> i1
%104 = scf.if %103 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%117 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%118 = torch.kernel_call "aten::eq" %117, %c0_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %118 : !basicpy.BoolType
}
%105 = basicpy.bool_cast %104 : !basicpy.BoolType -> i1
%106 = scf.if %105 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%117 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%118 = torch.kernel_call "aten::eq" %117, %c11_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %118 : !basicpy.BoolType
}
%107 = basicpy.bool_cast %106 : !basicpy.BoolType -> i1
scf.if %107 {
} else {
torch.prim.RaiseException %bytes_0
}
%108 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%109 = torch.kernel_call "aten::eq" %108, %c0_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%110 = basicpy.bool_cast %109 : !basicpy.BoolType -> i1
%111 = scf.if %110 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
torch.kernel_call "aten::warn" %bytes_2, %c2_i64 : (!basicpy.BytesType, i64) -> () {sigArgTypes = ["str", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = []}
%117 = torch.kernel_call "aten::to" %96, %c11_i64, %bool_false, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, !basicpy.BoolType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "bool", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %117 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %96 : !numpy.ndarray<*:!numpy.any_dtype>
}
%112 = torch.kernel_call "aten::dim" %111 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%113 = torch.kernel_call "aten::eq" %112, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%114 = basicpy.bool_cast %113 : !basicpy.BoolType -> i1
%115 = scf.if %114 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%117 = torch.kernel_call "aten::unsqueeze" %111, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%118 = torch.kernel_call "aten::size" %117 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%119 = torch.kernel_call "aten::list" %118 : (!basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%120 = torch.kernel_call "aten::size" %arg0, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%121 = torch.kernel_call "aten::size" %arg1, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%122 = basicpy.build_list %c1_i64, %120, %121 : (i64, i64, i64) -> !basicpy.ListType
%123 = torch.kernel_call "aten::ne" %119, %122 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.BoolType {sigArgTypes = ["int[]", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%124 = basicpy.bool_cast %123 : !basicpy.BoolType -> i1
scf.if %124 {
torch.prim.RaiseException %bytes_0
} else {
}
scf.yield %117 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%117 = torch.kernel_call "aten::dim" %111 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%118 = torch.kernel_call "aten::eq" %117, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%119 = basicpy.bool_cast %118 : !basicpy.BoolType -> i1
scf.if %119 {
%120 = torch.kernel_call "aten::size" %111 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%121 = torch.kernel_call "aten::list" %120 : (!basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%122 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%123 = torch.kernel_call "aten::size" %arg0, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%124 = torch.kernel_call "aten::size" %arg1, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%125 = basicpy.build_list %122, %123, %124 : (i64, i64, i64) -> !basicpy.ListType
%126 = torch.kernel_call "aten::ne" %121, %125 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.BoolType {sigArgTypes = ["int[]", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%127 = basicpy.bool_cast %126 : !basicpy.BoolType -> i1
scf.if %127 {
torch.prim.RaiseException %bytes_0
} else {
}
} else {
torch.prim.RaiseException %bytes_0
}
scf.yield %111 : !numpy.ndarray<*:!numpy.any_dtype>
}
%116 = torch.derefine %115 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %116 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %arg16 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%26 = torch.kernel_call "aten::__isnot__" %arg14, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%27 = basicpy.bool_cast %26 : !basicpy.BoolType -> i1
%28:2 = scf.if %27 -> (!basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%96 = torch.prim.unchecked_cast %arg14 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%98 = torch.kernel_call "aten::eq" %97, %c0_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%99 = torch.derefine %96 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %98, %99 : !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %bool_false, %arg14 : !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%29 = basicpy.bool_cast %28#0 : !basicpy.BoolType -> i1
%30 = scf.if %29 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%96 = torch.prim.unchecked_cast %28#1 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
torch.kernel_call "aten::warn" %bytes_1, %c2_i64 : (!basicpy.BytesType, i64) -> () {sigArgTypes = ["str", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = []}
%97 = torch.kernel_call "aten::to" %96, %c11_i64, %bool_false, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, !basicpy.BoolType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "bool", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%98 = torch.derefine %97 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %98 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %28#1 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%31 = torch.kernel_call "aten::__isnot__" %arg7, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%32 = basicpy.bool_cast %31 : !basicpy.BoolType -> i1
%33:2 = scf.if %32 -> (!basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%96 = torch.prim.unchecked_cast %arg7 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.kernel_call "aten::__isnot__" %arg8, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%98 = torch.derefine %96 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %97, %98 : !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %bool_false, %arg7 : !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%34 = basicpy.bool_cast %33#0 : !basicpy.BoolType -> i1
%35:6 = scf.if %34 -> (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%96 = torch.prim.unchecked_cast %33#1 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.prim.unchecked_cast %arg8 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%98 = torch.kernel_call "aten::__is__" %arg21, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%99 = basicpy.bool_cast %98 : !basicpy.BoolType -> i1
%100:2 = scf.if %99 -> (!basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%103 = torch.kernel_call "aten::__is__" %arg22, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %103, %arg21 : !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
%103 = torch.prim.unchecked_cast %arg21 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%104 = torch.derefine %103 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %bool_false, %104 : !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%101 = basicpy.bool_cast %100#0 : !basicpy.BoolType -> i1
%102:6 = scf.if %101 -> (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%103 = basicpy.build_list %c1_i64, %4#1, %c1_i64 : (i64, i64, i64) -> !basicpy.ListType
%104 = torch.kernel_call "aten::repeat" %96, %103 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%105 = basicpy.build_list %21#1, %104 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%106 = torch.kernel_call "aten::cat" %105, %c0_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%107 = basicpy.build_list %c1_i64, %4#1, %c1_i64 : (i64, i64, i64) -> !basicpy.ListType
%108 = torch.kernel_call "aten::repeat" %97, %107 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%109 = basicpy.build_list %21#2, %108 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%110 = torch.kernel_call "aten::cat" %109, %c0_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%111 = torch.kernel_call "aten::__isnot__" %25, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%112 = basicpy.bool_cast %111 : !basicpy.BoolType -> i1
%113 = scf.if %112 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%117 = torch.prim.unchecked_cast %25 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%118 = basicpy.build_list %c0_i64, %c1_i64 : (i64, i64) -> !basicpy.ListType
%119 = call @__torch__.torch.nn.functional._pad$0(%117, %118, %bytes, %cst_4) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BytesType, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
%120 = torch.derefine %119 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %120 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %25 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%114 = torch.kernel_call "aten::__isnot__" %30, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%115 = basicpy.bool_cast %114 : !basicpy.BoolType -> i1
%116 = scf.if %115 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%117 = torch.prim.unchecked_cast %30 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%118 = basicpy.build_list %c0_i64, %c1_i64 : (i64, i64) -> !basicpy.ListType
%119 = call @__torch__.torch.nn.functional._pad$0(%117, %118, %bytes, %cst_4) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BytesType, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
%120 = torch.derefine %119 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %120 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %30 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
scf.yield %106, %110, %100#1, %arg22, %116, %113 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
%103 = torch.kernel_call "aten::__is__" %100#1, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%104 = basicpy.bool_cast %103 : !basicpy.BoolType -> i1
%105 = scf.if %104 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
scf.yield %100#1 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
torch.prim.RaiseException %bytes_0
scf.yield %2 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%106 = torch.kernel_call "aten::__is__" %arg22, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%107 = basicpy.bool_cast %106 : !basicpy.BoolType -> i1
%108 = scf.if %107 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
scf.yield %arg22 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
torch.prim.RaiseException %bytes_0
scf.yield %1 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
scf.yield %21#1, %21#2, %105, %108, %30, %25 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
scf.yield %102#0, %102#1, %102#2, %102#3, %102#4, %102#5 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
%96 = torch.kernel_call "aten::__is__" %33#1, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%97 = basicpy.bool_cast %96 : !basicpy.BoolType -> i1
scf.if %97 {
} else {
torch.prim.RaiseException %bytes_0
}
%98 = torch.kernel_call "aten::__is__" %arg8, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%99 = basicpy.bool_cast %98 : !basicpy.BoolType -> i1
scf.if %99 {
} else {
torch.prim.RaiseException %bytes_0
}
scf.yield %21#1, %21#2, %arg21, %arg22, %30, %25 : !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%36 = torch.kernel_call "aten::contiguous" %22, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%37 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%38 = basicpy.build_list %4#0, %37, %13 : (i64, i64, i64) -> !basicpy.ListType
%39 = torch.kernel_call "aten::view" %36, %38 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%40 = torch.kernel_call "aten::transpose" %39, %c0_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%41 = torch.kernel_call "aten::contiguous" %35#0, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%42 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%43 = basicpy.build_list %c-1_i64, %42, %13 : (i64, i64, i64) -> !basicpy.ListType
%44 = torch.kernel_call "aten::view" %41, %43 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%45 = torch.kernel_call "aten::transpose" %44, %c0_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%46 = torch.kernel_call "aten::contiguous" %35#1, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%47 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%48 = basicpy.build_list %c-1_i64, %47, %13 : (i64, i64, i64) -> !basicpy.ListType
%49 = torch.kernel_call "aten::view" %46, %48 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%50 = torch.kernel_call "aten::transpose" %49, %c0_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%51 = torch.kernel_call "aten::__isnot__" %35#2, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%52 = basicpy.bool_cast %51 : !basicpy.BoolType -> i1
%53 = scf.if %52 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%96 = torch.prim.unchecked_cast %35#2 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.kernel_call "aten::size" %96, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%98 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%99 = torch.kernel_call "aten::eq" %97, %98 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%100 = basicpy.bool_cast %99 : !basicpy.BoolType -> i1
scf.if %100 {
} else {
torch.prim.RaiseException %bytes_0
}
%101 = torch.kernel_call "aten::size" %96, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%102 = torch.kernel_call "aten::eq" %101, %13 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%103 = basicpy.bool_cast %102 : !basicpy.BoolType -> i1
scf.if %103 {
} else {
torch.prim.RaiseException %bytes_0
}
scf.yield %96 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %45 : !numpy.ndarray<*:!numpy.any_dtype>
}
%54 = torch.kernel_call "aten::__isnot__" %35#3, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%55 = basicpy.bool_cast %54 : !basicpy.BoolType -> i1
%56 = scf.if %55 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%96 = torch.prim.unchecked_cast %35#3 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.kernel_call "aten::size" %96, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%98 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%99 = torch.kernel_call "aten::eq" %97, %98 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%100 = basicpy.bool_cast %99 : !basicpy.BoolType -> i1
scf.if %100 {
} else {
torch.prim.RaiseException %bytes_0
}
%101 = torch.kernel_call "aten::size" %96, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%102 = torch.kernel_call "aten::eq" %101, %13 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%103 = basicpy.bool_cast %102 : !basicpy.BoolType -> i1
scf.if %103 {
} else {
torch.prim.RaiseException %bytes_0
}
scf.yield %96 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %50 : !numpy.ndarray<*:!numpy.any_dtype>
}
%57 = torch.kernel_call "aten::size" %53, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%58 = torch.kernel_call "aten::__isnot__" %35#4, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%59 = basicpy.bool_cast %58 : !basicpy.BoolType -> i1
%60 = scf.if %59 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%96 = torch.prim.unchecked_cast %35#4 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.kernel_call "aten::size" %96, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%98 = torch.kernel_call "aten::eq" %97, %4#1 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%99 = basicpy.bool_cast %98 : !basicpy.BoolType -> i1
scf.if %99 {
} else {
torch.prim.RaiseException %bytes_0
}
%100 = torch.kernel_call "aten::size" %96, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%101 = torch.kernel_call "aten::eq" %100, %57 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%102 = basicpy.bool_cast %101 : !basicpy.BoolType -> i1
scf.if %102 {
} else {
torch.prim.RaiseException %bytes_0
}
%103 = torch.derefine %96 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %103 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %35#4 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%61 = basicpy.bool_cast %arg9 : !basicpy.BoolType -> i1
%62:5 = scf.if %61 -> (!numpy.ndarray<*:!numpy.any_dtype>, i64, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !numpy.ndarray<*:!numpy.any_dtype>) {
%96 = torch.kernel_call "aten::add" %57, %c1_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%97 = torch.kernel_call "aten::size" %53, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%98 = torch.kernel_call "aten::size" %53 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%99 = torch.kernel_call "aten::slice" %98, %c2_i64, %c9223372036854775807_i64, %c1_i64 : (!basicpy.ListType, i64, i64, i64) -> !basicpy.ListType {sigArgTypes = ["t[]", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%100 = basicpy.build_list %97, %c1_i64 : (i64, i64) -> !basicpy.ListType
%101 = torch.kernel_call "aten::add" %100, %99 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]", "t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%102 = torch.prim.dtype %53 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%103 = torch.prim.device %53 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.Device
%104 = torch.kernel_call "aten::zeros" %101, %102, %0, %103, %0 : (!basicpy.ListType, i64, !basicpy.NoneType, !torch.Device, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["int[]", "int?", "int?", "Device?", "bool?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%105 = basicpy.build_list %53, %104 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%106 = torch.kernel_call "aten::cat" %105, %c1_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%107 = torch.kernel_call "aten::size" %56, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> i64 {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%108 = torch.kernel_call "aten::size" %56 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%109 = torch.kernel_call "aten::slice" %108, %c2_i64, %c9223372036854775807_i64, %c1_i64 : (!basicpy.ListType, i64, i64, i64) -> !basicpy.ListType {sigArgTypes = ["t[]", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%110 = basicpy.build_list %107, %c1_i64 : (i64, i64) -> !basicpy.ListType
%111 = torch.kernel_call "aten::add" %110, %109 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]", "t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%112 = torch.prim.dtype %56 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%113 = torch.prim.device %56 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.Device
%114 = torch.kernel_call "aten::zeros" %111, %112, %0, %113, %0 : (!basicpy.ListType, i64, !basicpy.NoneType, !torch.Device, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["int[]", "int?", "int?", "Device?", "bool?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%115 = basicpy.build_list %56, %114 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType
%116 = torch.kernel_call "aten::cat" %115, %c1_i64 : (!basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%117 = torch.kernel_call "aten::__isnot__" %35#5, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%118 = basicpy.bool_cast %117 : !basicpy.BoolType -> i1
%119 = scf.if %118 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%123 = torch.prim.unchecked_cast %35#5 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%124 = basicpy.build_list %c0_i64, %c1_i64 : (i64, i64) -> !basicpy.ListType
%125 = call @__torch__.torch.nn.functional._pad$0(%123, %124, %bytes, %cst_4) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BytesType, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
%126 = torch.derefine %125 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %126 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %35#5 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
%120 = torch.kernel_call "aten::__isnot__" %60, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%121 = basicpy.bool_cast %120 : !basicpy.BoolType -> i1
%122 = scf.if %121 -> (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) {
%123 = torch.prim.unchecked_cast %60 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%124 = basicpy.build_list %c0_i64, %c1_i64 : (i64, i64) -> !basicpy.ListType
%125 = call @__torch__.torch.nn.functional._pad$0(%123, %124, %bytes, %cst_4) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BytesType, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
%126 = torch.derefine %125 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
scf.yield %126 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
} else {
scf.yield %60 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
}
scf.yield %106, %96, %119, %122, %116 : !numpy.ndarray<*:!numpy.any_dtype>, i64, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %53, %57, %35#5, %60, %56 : !numpy.ndarray<*:!numpy.any_dtype>, i64, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !numpy.ndarray<*:!numpy.any_dtype>
}
%63 = torch.kernel_call "aten::transpose" %62#0, %c1_i64, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%64 = torch.kernel_call "aten::bmm" %40, %63 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%65 = torch.kernel_call "aten::size" %64 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%66 = torch.kernel_call "aten::list" %65 : (!basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%67 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%68 = basicpy.build_list %67, %4#0, %62#1 : (i64, i64, i64) -> !basicpy.ListType
%69 = torch.kernel_call "aten::eq" %66, %68 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.BoolType {sigArgTypes = ["int[]", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%70 = basicpy.bool_cast %69 : !basicpy.BoolType -> i1
scf.if %70 {
} else {
torch.prim.RaiseException %bytes_0
}
%71 = torch.kernel_call "aten::__isnot__" %62#2, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%72 = basicpy.bool_cast %71 : !basicpy.BoolType -> i1
%73 = scf.if %72 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%96 = torch.prim.unchecked_cast %62#2 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = torch.prim.dtype %96 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%98 = torch.kernel_call "aten::eq" %97, %c11_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%99 = basicpy.bool_cast %98 : !basicpy.BoolType -> i1
%100 = scf.if %99 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%101 = torch.kernel_call "aten::masked_fill_" %64, %96, %cst : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, f64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %64 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%101 = torch.kernel_call "aten::add_" %64, %96, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %101 : !numpy.ndarray<*:!numpy.any_dtype>
}
scf.yield %100 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %64 : !numpy.ndarray<*:!numpy.any_dtype>
}
%74 = torch.kernel_call "aten::__isnot__" %62#3, %0 : (!torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%75 = basicpy.bool_cast %74 : !basicpy.BoolType -> i1
%76 = scf.if %75 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%96 = torch.prim.unchecked_cast %62#3 : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>> -> !numpy.ndarray<*:!numpy.any_dtype>
%97 = basicpy.build_list %4#1, %arg4, %4#0, %62#1 : (i64, i64, i64, i64) -> !basicpy.ListType
%98 = torch.kernel_call "aten::view" %73, %97 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%99 = torch.kernel_call "aten::unsqueeze" %96, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%100 = torch.kernel_call "aten::unsqueeze" %99, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%101 = torch.kernel_call "aten::masked_fill" %98, %100, %cst : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, f64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%102 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%103 = basicpy.build_list %102, %4#0, %62#1 : (i64, i64, i64) -> !basicpy.ListType
%104 = torch.kernel_call "aten::view" %101, %103 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %104 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %73 : !numpy.ndarray<*:!numpy.any_dtype>
}
%77 = torch.derefine %c-1_i64 : i64 -> !torch.optional<i64>
%78 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<i64>
%79 = call @__torch__.torch.nn.functional.softmax$8(%76, %77, %c3_i64, %78) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<i64>, i64, !torch.optional<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
%80 = call @__torch__.torch.nn.functional.dropout$3(%79, %arg10, %arg13, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%81 = torch.kernel_call "aten::bmm" %80, %62#4 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%82 = torch.kernel_call "aten::size" %81 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%83 = torch.kernel_call "aten::list" %82 : (!basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%84 = torch.kernel_call "aten::mul" %4#1, %arg4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%85 = basicpy.build_list %84, %4#0, %13 : (i64, i64, i64) -> !basicpy.ListType
%86 = torch.kernel_call "aten::eq" %83, %85 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.BoolType {sigArgTypes = ["int[]", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%87 = basicpy.bool_cast %86 : !basicpy.BoolType -> i1
scf.if %87 {
} else {
torch.prim.RaiseException %bytes_0
}
%88 = torch.kernel_call "aten::transpose" %81, %c0_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%89 = torch.kernel_call "aten::contiguous" %88, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%90 = basicpy.build_list %4#0, %4#1, %4#2 : (i64, i64, i64) -> !basicpy.ListType
%91 = torch.kernel_call "aten::view" %89, %90 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%92 = torch.derefine %arg12 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%93 = call @__torch__.torch.nn.functional.linear$6(%91, %arg11, %92) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%94 = basicpy.bool_cast %arg15 : !basicpy.BoolType -> i1
%95 = scf.if %94 -> (!basicpy.TupleType) {
%96 = basicpy.build_list %4#1, %arg4, %4#0, %62#1 : (i64, i64, i64, i64) -> !basicpy.ListType
%97 = torch.kernel_call "aten::view" %80, %96 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%98 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%99 = torch.kernel_call "aten::sum" %97, %98, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%100 = torch.kernel_call "aten::div" %99, %arg4 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%101 = basicpy.build_tuple %93, %100 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.TupleType
scf.yield %101 : !basicpy.TupleType
} else {
%96 = basicpy.build_tuple %93, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.NoneType) -> !basicpy.TupleType
scf.yield %96 : !basicpy.TupleType
}
return %95 : !basicpy.TupleType
}
func private @__torch__.torch.nn.functional.softmax$8(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<i64>, %arg2: i64, %arg3: !torch.optional<i64>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bytes = basicpy.bytes_constant "softmax"
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::__is__" %arg1, %0 : (!torch.optional<i64>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (i64) {
%7 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%8 = call @__torch__.torch.nn.functional._get_softmax_dim$9(%bytes, %7, %arg2) : (!basicpy.BytesType, i64, i64) -> i64
scf.yield %8 : i64
} else {
%7 = torch.prim.unchecked_cast %arg1 : !torch.optional<i64> -> i64
scf.yield %7 : i64
}
%4 = torch.kernel_call "aten::__is__" %arg3, %0 : (!torch.optional<i64>, !basicpy.NoneType) -> !basicpy.BoolType {sigArgTypes = ["t1", "t2"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
%6 = scf.if %5 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%7 = torch.kernel_call "aten::softmax" %arg0, %3, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %7 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
%7 = torch.prim.unchecked_cast %arg3 : !torch.optional<i64> -> i64
%8 = torch.kernel_call "aten::softmax" %arg0, %3, %7 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %6 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.nn.functional._get_softmax_dim$9(%arg0: !basicpy.BytesType, %arg1: i64, %arg2: i64) -> i64 {
%bool_true = basicpy.bool_constant true
%bytes = basicpy.bytes_constant "Implicit dimension choice for {} has been deprecated. Change the call to include dim=X as an argument."
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%0 = torch.kernel_call "aten::format" %bytes, %arg0 : (!basicpy.BytesType, !basicpy.BytesType) -> !basicpy.BytesType {sigArgTypes = ["str"], sigIsMutable = false, sigIsVararg = true, sigIsVarret = false, sigRetTypes = ["str"]}
torch.kernel_call "aten::warn" %0, %arg2 : (!basicpy.BytesType, i64) -> () {sigArgTypes = ["str", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = []}
%1 = torch.kernel_call "aten::eq" %arg1, %c0_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%8 = torch.kernel_call "aten::eq" %arg1, %c1_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %8 : !basicpy.BoolType
}
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
%8 = torch.kernel_call "aten::eq" %arg1, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %8 : !basicpy.BoolType
}
%6 = basicpy.bool_cast %5 : !basicpy.BoolType -> i1
%7 = scf.if %6 -> (i64) {
scf.yield %c0_i64 : i64
} else {
scf.yield %c1_i64 : i64
}
return %7 : i64
}
func private @__torch__.torch.nn.functional.layer_norm$10(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !basicpy.ListType, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: f64) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = torch.kernel_call "aten::layer_norm" %arg0, %arg1, %arg2, %arg3, %arg4, %bool_true : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "Tensor?", "Tensor?", "float", "bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @decoder.layers.0.forward(%arg0, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @decoder.layers.1.forward(%0, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.2.forward(%1, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @decoder.layers.3.forward(%2, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @decoder.layers.4.forward(%3, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.5.forward(%4, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @decoder.layers.6.forward(%5, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.7.forward(%6, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.8.forward(%7, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.9.forward(%8, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = call @decoder.layers.10.forward(%9, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = call @decoder.layers.11.forward(%10, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = call @decoder.layers.12.forward(%11, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = call @decoder.layers.13.forward(%12, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%14 = call @decoder.layers.14.forward(%13, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = call @decoder.layers.15.forward(%14, %arg1, %arg2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %15 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @fc.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @fc.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @fc.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @quant.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @dequant.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @softmax.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c2_i64 = constant 2 : i64
%c5_i64 = constant 5 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.derefine %c2_i64 : i64 -> !torch.optional<i64>
%2 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<i64>
%3 = call @__torch__.torch.nn.functional.softmax$8(%arg0, %1, %c5_i64, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<i64>, i64, !torch.optional<i64>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func @forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bytes = basicpy.bytes_constant "reflect"
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c9223372036854775807_i64 = constant 9223372036854775807 : i64
%c1_i64 = constant 1 : i64
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @n_fft : i64
%2 = torch.global_slot.get @hop_length : i64
%3 = torch.global_slot.get @win_length : i64
%4 = torch.global_slot.get @n_fft : i64
%5 = torch.prim.dtype %arg0 : !numpy.ndarray<*:!numpy.any_dtype> -> i64
%6 = torch.prim.device %arg0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.Device
%7 = torch.kernel_call "aten::hann_window" %4, %5, %0, %6, %0 : (i64, i64, !basicpy.NoneType, !torch.Device, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["int", "int?", "int?", "Device?", "bool?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%8 = torch.derefine %2 : i64 -> !torch.optional<i64>
%9 = torch.derefine %3 : i64 -> !torch.optional<i64>
%10 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = call @__torch__.torch.functional.stft$11(%arg0, %1, %8, %9, %10, %bool_true, %bytes, %bool_false, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, i64, !torch.optional<i64>, !torch.optional<i64>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !basicpy.BytesType, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = torch.kernel_call "aten::slice" %11, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%13 = torch.kernel_call "aten::slice" %12, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%14 = torch.kernel_call "aten::slice" %13, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%15 = torch.kernel_call "aten::select" %14, %c3_i64, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%16 = torch.kernel_call "aten::slice" %11, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%17 = torch.kernel_call "aten::slice" %16, %c1_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%18 = torch.kernel_call "aten::slice" %17, %c2_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%19 = torch.kernel_call "aten::select" %18, %c3_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%20 = torch.kernel_call "aten::pow" %15, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%21 = torch.kernel_call "aten::pow" %19, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%22 = torch.kernel_call "aten::add" %20, %21, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%23 = torch.kernel_call "aten::sqrt" %22 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%24 = call @audio_normalize.forward(%23) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%25 = call @quant.forward(%24) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%26 = call @encoder.forward(%25) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%27 = call @dequant.forward(%26) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%28 = basicpy.build_list %c2_i64, %c0_i64, %c1_i64 : (i64, i64, i64) -> !basicpy.ListType
%29 = torch.kernel_call "aten::permute" %27, %28 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%30 = torch.kernel_call "aten::contiguous" %29, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%31 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%32 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%33 = call @decoder.forward(%30, %31, %32) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
%34 = basicpy.build_list %c1_i64, %c2_i64, %c0_i64 : (i64, i64, i64) -> !basicpy.ListType
%35 = torch.kernel_call "aten::permute" %33, %34 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%36 = torch.kernel_call "aten::contiguous" %35, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%37 = call @fc.forward(%36) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%38 = torch.kernel_call "aten::transpose" %37, %c1_i64, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%39 = torch.kernel_call "aten::contiguous" %38, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%40 = call @softmax.forward(%39) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %40 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @__torch__.torch.functional.stft$11(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: i64, %arg2: !torch.optional<i64>, %arg3: !torch.optional<i64>, %arg4: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg5: !basicpy.BoolType, %arg6: !basicpy.BytesType, %arg7: !basicpy.BoolType, %arg8: !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c2_i64 = constant 2 : i64
%cst = constant 0.000000e+00 : f64
%c9223372036854775807_i64 = constant 9223372036854775807 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.bool_cast %arg5 : !basicpy.BoolType -> i1
%2 = scf.if %1 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%5 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%6 = torch.kernel_call "aten::sub" %c3_i64, %4 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::mul" %5, %6 : (!basicpy.ListType, i64) -> !basicpy.ListType {sigArgTypes = ["t[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%8 = torch.kernel_call "aten::size" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%9 = torch.kernel_call "aten::list" %8 : (!basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%10 = torch.kernel_call "aten::add" %7, %9 : (!basicpy.ListType, !basicpy.ListType) -> !basicpy.ListType {sigArgTypes = ["t[]", "t[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%11 = torch.kernel_call "aten::floordiv" %arg1, %c2_i64 : (i64, i64) -> i64 {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%12 = torch.kernel_call "aten::view" %arg0, %10 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%13 = basicpy.build_list %11, %11 : (i64, i64) -> !basicpy.ListType
%14 = call @__torch__.torch.nn.functional._pad$0(%12, %13, %arg6, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BytesType, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.kernel_call "aten::size" %14 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.ListType {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int[]"]}
%16 = torch.kernel_call "aten::neg" %4 : (i64) -> i64 {sigArgTypes = ["int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%17 = torch.kernel_call "aten::slice" %15, %16, %c9223372036854775807_i64, %c1_i64 : (!basicpy.ListType, i64, i64, i64) -> !basicpy.ListType {sigArgTypes = ["t[]", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["t[]"]}
%18 = torch.kernel_call "aten::view" %14, %17 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
scf.yield %18 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
%3 = torch.kernel_call "aten::stft" %2, %arg1, %arg2, %arg3, %arg4, %arg7, %arg8, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, !torch.optional<i64>, !torch.optional<i64>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "Tensor?", "bool", "bool?", "bool?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.0.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.0.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.0.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.0.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.0.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.0.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.0.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.0.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.1.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.1.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.1.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.1.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.1.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.1.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.1.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.1.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.2.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.2.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.2.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.2.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.2.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.2.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.2.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.2.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.3.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.3.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.3.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.3.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.3.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.3.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.3.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.3.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.4.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.4.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.4.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.4.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.4.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.4.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.4.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.4.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.5.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.5.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.5.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.5.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.5.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.5.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.5.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.5.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.6.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.6.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.6.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.6.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.6.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.6.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.6.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.6.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.7.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.7.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.7.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.7.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.7.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.7.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.7.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.7.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.8.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.8.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.8.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.8.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.8.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.8.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.8.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.8.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.9.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.9.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.9.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.9.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.9.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.9.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.9.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.9.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.10.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.10.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.10.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.10.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.10.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.10.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.10.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.10.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.11.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.11.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.11.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.11.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.11.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.11.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.11.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.11.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.12.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.12.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.12.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.12.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.12.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.12.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.12.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.12.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.13.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.13.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.13.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.13.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.13.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.13.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.13.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.13.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.14.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.14.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.14.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.14.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.14.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.14.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.14.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.14.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg2: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c0_i64 = constant 0 : i64
%c1_i64 = constant 1 : i64
%0 = call @decoder.layers.15.self_attn.forward(%arg0, %arg0, %arg0, %arg2, %bool_true, %arg1) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
%1 = torch.prim.TupleIndex %0, %c0_i64 : !basicpy.TupleType, i64 -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @decoder.layers.15.dropout1.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = torch.kernel_call "aten::add" %arg0, %2, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @decoder.layers.15.norm1.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @decoder.layers.15.linear1.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @__torch__.torch.nn.functional.relu$2(%5, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @decoder.layers.15.dropout.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @decoder.layers.15.linear2.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @decoder.layers.15.dropout2.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.kernel_call "aten::add" %4, %9, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%11 = call @decoder.layers.15.norm2.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %11 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.15.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.15.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.15.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.15.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.15.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.15.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.15.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.15.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.15.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.15.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.15.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.15.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.15.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.15.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.15.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.15.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.15.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.15.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.15.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.15.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.15.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.15.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.15.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.15.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.15.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.15.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.15.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.15.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.15.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.15.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.15.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.15.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.15.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.15.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.15.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.15.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.15.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.15.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.15.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.14.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.14.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.14.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.14.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.14.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.14.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.14.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.14.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.14.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.14.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.14.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.14.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.14.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.14.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.14.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.14.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.14.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.14.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.14.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.14.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.14.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.14.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.14.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.14.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.14.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.14.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.14.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.14.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.14.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.14.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.14.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.14.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.14.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.14.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.14.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.14.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.14.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.14.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.14.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.13.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.13.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.13.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.13.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.13.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.13.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.13.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.13.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.13.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.13.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.13.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.13.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.13.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.13.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.13.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.13.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.13.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.13.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.13.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.13.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.13.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.13.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.13.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.13.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.13.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.13.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.13.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.13.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.13.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.13.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.13.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.13.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.13.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.13.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.13.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.13.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.13.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.13.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.13.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.12.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.12.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.12.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.12.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.12.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.12.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.12.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.12.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.12.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.12.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.12.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.12.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.12.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.12.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.12.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.12.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.12.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.12.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.12.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.12.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.12.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.12.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.12.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.12.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.12.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.12.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.12.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.12.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.12.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.12.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.12.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.12.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.12.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.12.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.12.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.12.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.12.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.12.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.12.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.11.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.11.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.11.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.11.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.11.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.11.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.11.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.11.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.11.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.11.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.11.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.11.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.11.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.11.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.11.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.11.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.11.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.11.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.11.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.11.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.11.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.11.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.11.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.11.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.11.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.11.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.11.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.11.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.11.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.11.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.11.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.11.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.11.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.11.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.11.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.11.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.11.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.11.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.11.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.10.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.10.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.10.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.10.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.10.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.10.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.10.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.10.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.10.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.10.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.10.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.10.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.10.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.10.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.10.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.10.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.10.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.10.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.10.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.10.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.10.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.10.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.10.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.10.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.10.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.10.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.10.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.10.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.10.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.10.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.10.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.10.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.10.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.10.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.10.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.10.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.10.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.10.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.10.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.9.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.9.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.9.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.9.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.9.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.9.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.9.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.9.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.9.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.9.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.9.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.9.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.9.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.9.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.9.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.9.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.9.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.9.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.9.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.9.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.9.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.9.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.9.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.9.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.9.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.9.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.9.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.9.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.9.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.9.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.9.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.9.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.9.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.9.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.9.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.9.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.9.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.9.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.9.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.8.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.8.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.8.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.8.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.8.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.8.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.8.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.8.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.8.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.8.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.8.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.8.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.8.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.8.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.8.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.8.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.8.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.8.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.8.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.8.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.8.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.8.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.8.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.8.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.8.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.8.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.8.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.8.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.8.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.8.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.8.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.8.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.8.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.8.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.8.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.8.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.8.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.8.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.8.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.7.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.7.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.7.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.7.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.7.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.7.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.7.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.7.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.7.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.7.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.7.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.7.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.7.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.7.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.7.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.7.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.7.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.7.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.7.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.7.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.7.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.7.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.7.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.7.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.7.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.7.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.7.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.7.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.7.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.7.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.7.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.7.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.7.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.7.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.7.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.7.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.7.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.7.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.7.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.6.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.6.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.6.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.6.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.6.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.6.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.6.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.6.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.6.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.6.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.6.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.6.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.6.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.6.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.6.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.6.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.6.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.6.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.6.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.6.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.6.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.6.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.6.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.6.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.6.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.6.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.6.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.6.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.6.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.6.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.6.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.6.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.6.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.6.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.6.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.6.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.6.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.6.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.6.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.5.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.5.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.5.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.5.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.5.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.5.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.5.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.5.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.5.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.5.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.5.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.5.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.5.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.5.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.5.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.5.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.5.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.5.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.5.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.5.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.5.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.5.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.5.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.5.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.5.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.5.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.5.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.5.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.5.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.5.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.5.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.5.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.5.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.5.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.5.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.5.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.5.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.5.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.5.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.4.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.4.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.4.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.4.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.4.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.4.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.4.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.4.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.4.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.4.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.4.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.4.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.4.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.4.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.4.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.4.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.4.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.4.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.4.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.4.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.4.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.4.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.4.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.4.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.4.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.4.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.4.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.4.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.4.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.4.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.4.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.4.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.4.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.4.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.4.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.4.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.4.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.4.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.4.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.3.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.3.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.3.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.3.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.3.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.3.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.3.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.3.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.3.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.3.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.3.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.3.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.3.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.3.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.3.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.3.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.3.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.3.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.3.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.3.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.3.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.3.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.3.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.3.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.3.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.3.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.3.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.3.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.3.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.3.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.3.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.3.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.3.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.3.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.3.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.3.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.3.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.3.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.3.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.2.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.2.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.2.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.2.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.2.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.2.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.2.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.2.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.2.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.2.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.2.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.2.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.2.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.2.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.2.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.2.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.2.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.2.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.2.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.2.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.2.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.2.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.2.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.2.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.2.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.2.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.2.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.2.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.2.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.2.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.2.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.2.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.2.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.2.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.2.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.2.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.2.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.2.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.2.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.1.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.1.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.1.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.1.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.1.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.1.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.1.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.1.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.1.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.1.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.1.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.1.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.1.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.1.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.1.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.1.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.1.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.1.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.1.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.1.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.1.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.1.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.1.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.1.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.1.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.1.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.1.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.1.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.1.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.1.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.1.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.1.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.1.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.1.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.1.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.1.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.1.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.1.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.1.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.self_attn.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>, %arg2: !numpy.ndarray<*:!numpy.any_dtype>, %arg3: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, %arg4: !basicpy.BoolType, %arg5: !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.global_slot.get @decoder.layers.0.self_attn._qkv_same_embed_dim : !basicpy.BoolType
%2 = torch.kernel_call "aten::__not__" %1 : (!basicpy.BoolType) -> !basicpy.BoolType {sigArgTypes = ["bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.TupleType) {
%5 = torch.global_slot.get @decoder.layers.0.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.0.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.0.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.0.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.0.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.0.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.0.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.0.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.0.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.0.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.0.self_attn.training : !basicpy.BoolType
%16 = torch.global_slot.get @decoder.layers.0.self_attn.q_proj_weight : !basicpy.NoneType
%17 = torch.global_slot.get @decoder.layers.0.self_attn.k_proj_weight : !basicpy.NoneType
%18 = torch.global_slot.get @decoder.layers.0.self_attn.v_proj_weight : !basicpy.NoneType
%19 = torch.derefine %16 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %17 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = torch.derefine %18 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%22 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%23 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%24 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_true, %19, %20, %21, %22, %23) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %24 : !basicpy.TupleType
} else {
%5 = torch.global_slot.get @decoder.layers.0.self_attn.embed_dim : i64
%6 = torch.global_slot.get @decoder.layers.0.self_attn.num_heads : i64
%7 = torch.global_slot.get @decoder.layers.0.self_attn.in_proj_weight : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @decoder.layers.0.self_attn.in_proj_bias : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @decoder.layers.0.self_attn.bias_k : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%10 = torch.global_slot.get @decoder.layers.0.self_attn.bias_v : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.global_slot.get @decoder.layers.0.self_attn.add_zero_attn : !basicpy.BoolType
%12 = torch.global_slot.get @decoder.layers.0.self_attn.dropout : f64
%13 = torch.global_slot.get @decoder.layers.0.self_attn.out_proj.weight : !numpy.ndarray<*:!numpy.any_dtype>
%14 = torch.global_slot.get @decoder.layers.0.self_attn.out_proj.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @decoder.layers.0.self_attn.training : !basicpy.BoolType
%16 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%17 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%18 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%19 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%20 = torch.derefine %0 : !basicpy.NoneType -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%21 = call @__torch__.torch.nn.functional.multi_head_attention_forward$7(%arg0, %arg1, %arg2, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %arg3, %arg4, %arg5, %bool_false, %16, %17, %18, %19, %20) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !basicpy.TupleType
scf.yield %21 : !basicpy.TupleType
}
return %4 : !basicpy.TupleType
}
func private @decoder.layers.0.dropout1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.0.dropout1.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.norm1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.0.norm1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.0.norm1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.linear1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.0.linear1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.0.linear1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.dropout.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.0.dropout.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.linear2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.global_slot.get @decoder.layers.0.linear2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.0.linear2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%3 = call @__torch__.torch.nn.functional.linear$6(%arg0, %0, %2) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.dropout2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @decoder.layers.0.dropout2.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @decoder.layers.0.norm2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c512_i64 = constant 512 : i64
%cst = constant 1.000000e-05 : f64
%0 = torch.global_slot.get @decoder.layers.0.norm2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @decoder.layers.0.norm2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%2 = basicpy.build_list %c512_i64 : (i64) -> !basicpy.ListType
%3 = torch.derefine %0 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%4 = torch.derefine %1 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%5 = call @__torch__.torch.nn.functional.layer_norm$10(%arg0, %2, %3, %4, %cst) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.1.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.1.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.1.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.2.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.2.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.2.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.3.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.3.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.3.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.4.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.4.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.4.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.5.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.5.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.5.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.6.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.6.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.6.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.7.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.7.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.7.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.8.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.8.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.8.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.9.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.9.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.9.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.9.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.10.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.10.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.11.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.12.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.13.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.14.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.14.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.14.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.15.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.15.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.15.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.16.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.16.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.16.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.17.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.17.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.17.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.18.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.18.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.18.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.19.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.19.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.19.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.20.layers.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.20.skip : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
%3 = scf.if %2 -> (!numpy.ndarray<*:!numpy.any_dtype>) {
%4 = call @encoder.20.skip_add.add(%0, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
scf.yield %4 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
scf.yield %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
return %3 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.20.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.20.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.20.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.20.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.20.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.20.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.20.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.20.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.20.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.20.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.20.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.20.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.20.layers.1._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.20.layers.1.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.20.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.20.layers.1.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.20.layers.1.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.20.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.20.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.20.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.20.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.20.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.20.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.20.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.20.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.20.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.20.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.20.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.20.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.20.layers.6._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.20.layers.6.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.20.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.20.layers.6.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.20.layers.6.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.20.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.20.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.20.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.20.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.20.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.6._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.20.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.20.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.20.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.20.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.20.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.20.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.20.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.20.layers.1._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.19.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.19.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.19.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.19.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.19.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.19.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.19.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.19.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.19.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.19.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.19.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.19.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.19.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.19.layers.1._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.19.layers.1.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.19.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.19.layers.1.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.19.layers.1.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.19.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.19.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.19.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.19.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.19.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.19.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.19.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.19.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.19.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.19.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.19.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.19.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.19.layers.6._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.19.layers.6.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.19.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.19.layers.6.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.19.layers.6.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.19.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.19.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.19.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.19.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.19.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.6._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.19.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.19.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.19.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.19.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.19.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.19.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.19.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.19.layers.1._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.18.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.18.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.18.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.18.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.18.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.18.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.18.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.18.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.18.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.18.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.18.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.18.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.18.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.18.layers.1._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.18.layers.1.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.18.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.18.layers.1.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.18.layers.1.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.18.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.18.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.18.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.18.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.18.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.18.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.18.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.18.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.18.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.18.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.18.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.18.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.18.layers.6._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.18.layers.6.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.18.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.18.layers.6.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.18.layers.6.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.18.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.18.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.18.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.18.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.18.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.6._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.18.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.18.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.18.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.18.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.18.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.18.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.18.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.18.layers.1._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.17.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.17.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.17.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.17.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.17.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.17.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.17.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.17.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.17.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.17.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.17.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.17.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.17.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.17.layers.1._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.17.layers.1.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.17.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.17.layers.1.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.17.layers.1.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.17.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.17.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.17.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.17.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.17.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.17.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.17.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.17.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.17.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.17.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.17.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.17.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.17.layers.6._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.17.layers.6.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.17.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.17.layers.6.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.17.layers.6.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.17.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.17.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.17.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.17.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.17.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.6._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.17.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.17.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.17.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.17.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.17.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.17.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.17.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.17.layers.1._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.16.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.16.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.16.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.16.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.16.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.16.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.16.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.16.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.16.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.16.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.16.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.16.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.16.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.16.layers.1._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.16.layers.1.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.16.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.16.layers.1.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.16.layers.1.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.16.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.16.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.16.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.16.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.16.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.16.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.16.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.16.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.16.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.16.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.16.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.16.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.16.layers.6._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.16.layers.6.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.16.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.16.layers.6.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.16.layers.6.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.16.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.16.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.16.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.16.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.16.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.6._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.16.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.16.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.16.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.16.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.16.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.16.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.16.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.16.layers.1._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.15.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.15.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.15.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.15.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.15.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.15.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.15.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.15.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.15.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.15.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.15.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.15.layers.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.15.layers.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.15.layers.1._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.15.layers.1.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.15.layers.1.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.15.layers.1.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.15.layers.1.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.15.layers.1.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.15.layers.1.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.15.layers.1.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.15.layers.1.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.15.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.15.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.15.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.15.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.15.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.15.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.15.layers.5.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.15.layers.5.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%bool_true = basicpy.bool_constant true
%c1_i64 = constant 1 : i64
%cst = constant 1.000000e-01 : f64
%cst_0 = constant 1.000000e-05 : f64
%0 = call @encoder.15.layers.6._check_input_dim(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType
%1 = torch.global_slot.get @encoder.15.layers.6.training : !basicpy.BoolType
%2 = basicpy.bool_cast %1 : !basicpy.BoolType -> i1
scf.if %2 {
%15 = torch.global_slot.get @encoder.15.layers.6.num_batches_tracked : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::add" %15, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
torch.global_slot.set @encoder.15.layers.6.num_batches_tracked = %16 : !numpy.ndarray<*:!numpy.any_dtype>
} else {
}
%3 = torch.global_slot.get @encoder.15.layers.6.training : !basicpy.BoolType
%4 = basicpy.bool_cast %3 : !basicpy.BoolType -> i1
%5 = scf.if %4 -> (!basicpy.BoolType) {
scf.yield %bool_true : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%6 = torch.global_slot.get @encoder.15.layers.6.running_mean : !numpy.ndarray<*:!numpy.any_dtype>
%7 = torch.global_slot.get @encoder.15.layers.6.running_var : !numpy.ndarray<*:!numpy.any_dtype>
%8 = torch.global_slot.get @encoder.15.layers.6.weight : !numpy.ndarray<*:!numpy.any_dtype>
%9 = torch.global_slot.get @encoder.15.layers.6.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.derefine %6 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%11 = torch.derefine %7 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%12 = torch.derefine %8 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%13 = torch.derefine %9 : !numpy.ndarray<*:!numpy.any_dtype> -> !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%14 = call @__torch__.torch.nn.functional.batch_norm$4(%arg0, %10, %11, %12, %13, %5, %cst, %cst_0) : (!numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.BoolType, f64, f64) -> !numpy.ndarray<*:!numpy.any_dtype>
return %14 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.15.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.6._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.15.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.15.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.15.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.15.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.15.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.15.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.15.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.15.layers.1._check_input_dim(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.NoneType {
%bytes = basicpy.bytes_constant "Exception"
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%2 = torch.kernel_call "aten::ne" %1, %c2_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
%3 = basicpy.bool_cast %2 : !basicpy.BoolType -> i1
%4 = scf.if %3 -> (!basicpy.BoolType) {
%6 = torch.kernel_call "aten::dim" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> i64 {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["int"]}
%7 = torch.kernel_call "aten::ne" %6, %c3_i64 : (i64, i64) -> !basicpy.BoolType {sigArgTypes = ["int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["bool"]}
scf.yield %7 : !basicpy.BoolType
} else {
scf.yield %bool_false : !basicpy.BoolType
}
%5 = basicpy.bool_cast %4 : !basicpy.BoolType -> i1
scf.if %5 {
torch.prim.RaiseException %bytes
} else {
}
return %0 : !basicpy.NoneType
}
func private @encoder.14.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.14.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.14.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.14.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.14.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.14.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.14.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.14.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.14.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %7 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.14.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.14.layers.0.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.14.layers.0.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.14.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.14.layers.4.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.14.layers.4.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.14.layers.7.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.4.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.14.layers.4.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.14.layers.4.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.4.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.0.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c2_i64 = constant 2 : i64
%c3_i64 = constant 3 : i64
%c1_i64 = constant 1 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.14.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.14.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.14.layers.0.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.13.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.13.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.13.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.13.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.13.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.13.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.13.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.13.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @encoder.13.layers.9.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = call @encoder.13.layers.10.forward(%9) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = call @encoder.13.layers.11.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = call @encoder.13.layers.12.forward(%11) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = call @encoder.13.layers.13.forward(%12) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%14 = call @encoder.13.layers.14.forward(%13) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = call @encoder.13.layers.15.forward(%14) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%16 = call @encoder.13.layers.16.forward(%15) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%17 = call @encoder.13.layers.17.forward(%16) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %17 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.13.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.0.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.13.layers.0.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.13.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.13.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.13.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.13.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.13.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.13.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.5.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.13.layers.5.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.13.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.9.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.9.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.13.layers.9.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.10.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.11.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.12.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.13.layers.12.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.13.layers.13._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.13.layers.13.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.13.layers.13._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.13.layers.13.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.13.layers.13.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.14.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.14.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.13.layers.14.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.15.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.16.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.17.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.13.layers.17.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.14.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.13.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.14.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.13.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.13.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.13.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.13.layers.13.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.13.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.9.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.13.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.9.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.5.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.13.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.5.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.13.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.13.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.13.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.13.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.0.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.13.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.13.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.13.layers.0.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.12.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.12.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.12.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.12.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.12.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.12.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.12.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.12.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @encoder.12.layers.9.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = call @encoder.12.layers.10.forward(%9) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = call @encoder.12.layers.11.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = call @encoder.12.layers.12.forward(%11) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = call @encoder.12.layers.13.forward(%12) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%14 = call @encoder.12.layers.14.forward(%13) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = call @encoder.12.layers.15.forward(%14) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%16 = call @encoder.12.layers.16.forward(%15) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%17 = call @encoder.12.layers.17.forward(%16) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %17 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.12.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.0.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.12.layers.0.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.12.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.12.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.12.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.12.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.12.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.12.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.5.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.12.layers.5.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.12.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.9.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.9.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.12.layers.9.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.10.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.11.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.12.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.12.layers.12.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.12.layers.13._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.12.layers.13.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.12.layers.13._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.12.layers.13.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.12.layers.13.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.14.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.14.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.12.layers.14.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.15.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.16.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.17.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.12.layers.17.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.14.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.12.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.14.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.12.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.13.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.12.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.12.layers.13.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.13.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.9.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.12.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.9.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.5.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.12.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.5.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.12.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.12.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.12.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.12.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.0.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.12.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.12.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.12.layers.0.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.11.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.11.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.11.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.11.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.11.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.11.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.11.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.11.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @encoder.11.layers.9.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = call @encoder.11.layers.10.forward(%9) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = call @encoder.11.layers.11.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = call @encoder.11.layers.12.forward(%11) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = call @encoder.11.layers.13.forward(%12) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%14 = call @encoder.11.layers.14.forward(%13) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = call @encoder.11.layers.15.forward(%14) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%16 = call @encoder.11.layers.16.forward(%15) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%17 = call @encoder.11.layers.17.forward(%16) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %17 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.11.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.0.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.11.layers.0.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.11.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.11.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.11.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.11.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.11.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.11.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.5.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.11.layers.5.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.11.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.9.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.9.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.11.layers.9.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.10.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.11.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.12.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.11.layers.12.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.11.layers.13._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.11.layers.13.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.11.layers.13._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.11.layers.13.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.11.layers.13.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.14.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.14.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.11.layers.14.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.15.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.16.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.17.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.11.layers.17.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.14.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.11.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.14.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.11.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.13.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.11.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.11.layers.13.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.13.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.9.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.11.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.9.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.5.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.11.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.5.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.11.layers.4._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.4._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.11.layers.4.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.11.layers.4._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.4._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.11.layers.4.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.4.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.0.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.11.layers.0.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.11.layers.0.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.11.layers.0.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.10.layers.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%2 = call @encoder.10.layers.2.forward(%1) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%3 = call @encoder.10.layers.3.forward(%2) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%4 = call @encoder.10.layers.4.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.10.layers.5.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.10.layers.6.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.10.layers.7.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.10.layers.8.forward(%7) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%9 = call @encoder.10.layers.9.forward(%8) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%10 = call @encoder.10.layers.10.forward(%9) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%11 = call @encoder.10.layers.11.forward(%10) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%12 = call @encoder.10.layers.12.forward(%11) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%13 = call @encoder.10.layers.13.forward(%12) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%14 = call @encoder.10.layers.14.forward(%13) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%15 = call @encoder.10.layers.15.forward(%14) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%16 = call @encoder.10.layers.16.forward(%15) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%17 = call @encoder.10.layers.17.forward(%16) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %17 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.skip_add.add(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::add" %arg0, %arg1, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.10.skip_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.skip_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.0.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.10.layers.0.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.2.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.3.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.10.layers.3.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.4.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.10.layers.4._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.10.layers.4.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.10.layers.4._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.10.layers.4.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.10.layers.4.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.5.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.5.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.10.layers.5.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.6.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.7.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.8.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.10.layers.8.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.9.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.9.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.10.layers.9.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.10.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.11.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.12.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.10.layers.12.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%c2_i64 = constant 2 : i64
%0 = basicpy.singleton : !basicpy.NoneType
%1 = basicpy.build_list %c2_i64 : (i64) -> !basicpy.ListType
%2 = torch.kernel_call "aten::mean" %arg0, %1, %bool_false, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.ListType, !basicpy.BoolType, !basicpy.NoneType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int[]", "bool", "int?"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::unsqueeze" %2, %c2_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = call @encoder.10.layers.13._se_reduce.forward(%3) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%5 = call @encoder.10.layers.13.swish.forward(%4) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%6 = call @encoder.10.layers.13._se_expand.forward(%5) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%7 = call @encoder.10.layers.13.sigmoid.forward(%6) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%8 = call @encoder.10.layers.13.f_add.mul(%7, %arg0) : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %8 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.14.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.14.0.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = call @encoder.10.layers.14.1.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.15.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.16.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.17.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%0 = torch.global_slot.get @encoder.10.layers.17.training : !basicpy.BoolType
%1 = call @__torch__.torch.nn.functional.dropout$3(%arg0, %cst, %0, %bool_false) : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.14.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.10.layers.14.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.10.layers.14.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.14.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13._se_reduce.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.10.layers.13._se_reduce.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.10.layers.13._se_reduce.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13.swish.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = call @encoder.10.layers.13.swish.sigmoid.forward(%arg0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.kernel_call "aten::mul" %arg0, %0 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13._se_expand.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.10.layers.13._se_expand.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.10.layers.13._se_expand.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13.f_add.mul(%arg0: !numpy.ndarray<*:!numpy.any_dtype>, %arg1: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::mul" %arg0, %arg1 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = call @encoder.10.layers.13.f_add.activation_post_process.forward(%0) : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype>
return %1 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13.f_add.activation_post_process.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
return %arg0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.13.swish.sigmoid.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%0 = torch.kernel_call "aten::sigmoid" %arg0 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.9.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c3_i64 = constant 3 : i64
%c8_i64 = constant 8 : i64
%0 = torch.global_slot.get @encoder.10.layers.9.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.10.layers.9.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c3_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c8_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor?", "int[]", "int[]", "int[]", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
return %5 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.9.1.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%bool_true = basicpy.bool_constant true
%0 = call @__torch__.torch.nn.functional.relu$2(%arg0, %bool_true) : (!numpy.ndarray<*:!numpy.any_dtype>, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype>
return %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func private @encoder.10.layers.5.0.forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {
%c1_i64 = constant 1 : i64
%c0_i64 = constant 0 : i64
%0 = torch.global_slot.get @encoder.10.layers.5.0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%1 = torch.global_slot.get @encoder.10.layers.5.0.bias : !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
%2 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%3 = basicpy.build_list %c0_i64 : (i64) -> !basicpy.ListType
%4 = basicpy.build_list %c1_i64 : (i64) -> !basicpy.ListType
%5 = torch.kernel_call "aten::conv1d" %arg0, %0, %1, %2, %3, %4, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !torch.optional<!numpy.ndarray<*:!numpy.any_dtype>>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment