Skip to content

Instantly share code, notes, and snippets.

@renxida
Created January 31, 2024 04:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save renxida/91b331231cd4a5cb008c8ec4ca39b086 to your computer and use it in GitHub Desktop.
Save renxida/91b331231cd4a5cb008c8ec4ca39b086 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
~/torch-mlir/build/bin/torch-mlir-opt --convert-torch-to-linalg --convert-torch-to-tmtensor --debug -mlir-disable-threading -mlir-print-ir-after-all ./stripped-opt-125M.fp32.onnx.torch.mlir &> /tmp/torchopt.out
/home/azureuser/torch-mlir/build/bin/torch-mlir-opt: /home/azureuser/miniconda/lib/libtinfo.so.6: no version information available (required by /home/azureuser/torch-mlir/build/bin/torch-mlir-opt)
Args: /home/azureuser/torch-mlir/build/bin/torch-mlir-opt --convert-torch-to-linalg --convert-torch-to-tmtensor --debug -mlir-disable-threading -mlir-print-ir-after-all ./stripped-opt-125M.fp32.onnx.torch.mlir
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DistinctAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::BytecodeOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionKindInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ConditionallySpeculatable)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemoryEffectOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ResourceBlobManagerDialectInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmDialectInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::BytecodeDialectInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineBinaryOpExprStorage)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineConstantExprStorage)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineDimExprStorage)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineMapStorage)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::IntegerSetStorage)
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroOperands<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneRegion<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroResults<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroSuccessors<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NoRegionArguments<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NoTerminator<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::SingleBlock<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OpInvariants<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::BytecodeOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::AffineScope<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::IsIsolatedFromAbove<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::SymbolTable<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionKindInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::HasOnlyGraphRegion<Empty>)
Load new dialect in Context func
ImplicitTypeIDRegistry::lookupOrInsert(mlir::CallOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolUserOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::CallableOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::FunctionOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionBranchTerminatorOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DialectInlinerInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ConvertToLLVMPatternInterface)
Load new dialect in Context cf
Load new dialect in Context arith
ImplicitTypeIDRegistry::lookupOrInsert(mlir::arith::ArithFastMathInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::VectorUnrollOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferTypeOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferIntRangeInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::CastOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::BranchOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::AutomaticAllocationScope<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::CallableOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::FunctionOpInterface::Trait<Empty>)
Load new dialect in Context torch
ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionBranchOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroRegions<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneResult<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::ValueTensorType>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferTypeOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ConstantLike<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ConditionallySpeculatable::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::AlwaysSpeculatableImplTrait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemoryEffectOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DialectResourceBlobHandle<mlir::BuiltinDialect>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::detail::ValueTensorLiteralOpGenericAdaptorBase::Properties)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::IntType>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::AllowedInModuleInitializer<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::Type>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<3>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::AllowsTypeRefinement<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::ReadOnly<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneOperand<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::HasValueSemantics<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::BoolType>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<2>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::ListType>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::VariadicOperands<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::detail::ConstantBoolOpGenericAdaptorBase::Properties)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<4>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::NoneType>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<5>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::FloatType>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::detail::ConstantFloatOpGenericAdaptorBase::Properties)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NResults<3>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::HasParent<mlir::func::FuncOp>::Impl<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::MemRefsNormalizable<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionBranchTerminatorOpInterface::Trait<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ReturnLike<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::IsTerminator<Empty>)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::OpToOpPassAdaptor)
Load new dialect in Context complex
Load new dialect in Context linalg
Load new dialect in Context affine
Load new dialect in Context ub
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ub::PoisonAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::affine::AffineDmaStartOp)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::affine::AffineMapAccessInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::affine::AffineDmaWaitOp)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::LoopLikeOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::affine::AffineReadOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::affine::AffineWriteOpInterface)
Load new dialect in Context math
Load new dialect in Context memref
ImplicitTypeIDRegistry::lookupOrInsert(mlir::CopyOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::PromotableMemOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DestructurableAccessorOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::PromotableAllocationOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DestructurableAllocationOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ViewLikeOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedDimOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OffsetSizeAndStrideOpInterface)
Load new dialect in Context tensor
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ReifyRankedShapedTypeOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DestinationStyleOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::AggregatedOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TilingInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::LinalgOp)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::ContractionOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::ConvolutionOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::FillOpInterface)
Load new dialect in Context tm_tensor
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::TMTensor::TMTensorOp)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::TMTensor::ScalarLoopOpInterface)
Load new dialect in Context torch_c
//===-------------------------------------------===//
Legalizing operation : 'func.func'(0x5616a31c0320) {
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31aa4b0) {
%0 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.embed_tokens.weight> : tensor<50272x768xf32>}> : () -> !torch.vtensor<[50272,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ab430) {
%1 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.embed_positions.weight> : tensor<2050x768xf32>}> : () -> !torch.vtensor<[2050,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ac3b0) {
%2 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31aca30) {
%3 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ad560) {
%4 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ad790) {
%5 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ad9c0) {
%6 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31adbf0) {
%7 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ae2a0) {
%8 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ae4d0) {
%9 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31aeb50) {
%10 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31afad0) {
%11 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a316c9d0) {
%12 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b06a0) {
%13 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b0d10) {
%14 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b0f40) {
%15 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.0.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b1170) {
%16 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b13a0) {
%17 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b15d0) {
%18 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b1800) {
%19 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b1a30) {
%20 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b1c60) {
%21 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b1e60) {
%22 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b24e0) {
%23 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31affa0) {
%24 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b3280) {
%25 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b3450) {
%26 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b3680) {
%27 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.1.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b38b0) {
%28 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b3ae0) {
%29 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b3d10) {
%30 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b3f40) {
%31 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b4170) {
%32 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b43a0) {
%33 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b45a0) {
%34 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b47a0) {
%35 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b49a0) {
%36 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b4ba0) {
%37 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b5250) {
%38 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b5480) {
%39 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.2.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b56b0) {
%40 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b58e0) {
%41 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b5b10) {
%42 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b6550) {
%43 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b6780) {
%44 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b69b0) {
%45 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b6bb0) {
%46 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b75c0) {
%47 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b2a00) {
%48 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b2c70) {
%49 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b2ea0) {
%50 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b30d0) {
%51 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.3.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b9fa0) {
%52 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ba1b0) {
%53 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ba3e0) {
%54 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ba610) {
%55 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ba840) {
%56 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31baa70) {
%57 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bac70) {
%58 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bae70) {
%59 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bb070) {
%60 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bb270) {
%61 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bb4a0) {
%62 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bb6d0) {
%63 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.4.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bb900) {
%64 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bbb30) {
%65 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bbd60) {
%66 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bbf90) {
%67 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bc1c0) {
%68 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bc3f0) {
%69 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bc5f0) {
%70 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bc7f0) {
%71 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bc9f0) {
%72 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bcbf0) {
%73 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bce20) {
%74 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bd050) {
%75 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.5.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bd280) {
%76 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bd4b0) {
%77 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bd6e0) {
%78 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bd910) {
%79 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bdb40) {
%80 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bdd70) {
%81 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bdf70) {
%82 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31be170) {
%83 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31be370) {
%84 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31be570) {
%85 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31be7a0) {
%86 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31be9d0) {
%87 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.6.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bec00) {
%88 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bee30) {
%89 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bf060) {
%90 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b5d40) {
%91 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b5f70) {
%92 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b61a0) {
%93 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c0120) {
%94 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b6db0) {
%95 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b8370) {
%96 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b8570) {
%97 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b87a0) {
%98 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b89d0) {
%99 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.7.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b8c00) {
%100 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b8e30) {
%101 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b7050) {
%102 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b7280) {
%103 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b74b0) {
%104 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c3880) {
%105 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c3a80) {
%106 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c3c80) {
%107 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c4e90) {
%108 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c5090) {
%109 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c52c0) {
%110 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c54f0) {
%111 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.8.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c5720) {
%112 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c5950) {
%113 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c5b80) {
%114 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c5db0) {
%115 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c5fe0) {
%116 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6210) {
%117 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6410) {
%118 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6610) {
%119 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6810) {
%120 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6a10) {
%121 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6c40) {
%122 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c6e70) {
%123 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.9.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c70a0) {
%124 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c72d0) {
%125 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c7500) {
%126 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c7730) {
%127 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c7960) {
%128 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c7b90) {
%129 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c7d90) {
%130 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c7f90) {
%131 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c8190) {
%132 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c8390) {
%133 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c85c0) {
%134 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c87f0) {
%135 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.10.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c8a20) {
%136 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.self_attn.k_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c8c50) {
%137 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.self_attn.v_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c8e80) {
%138 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.self_attn.q_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c90b0) {
%139 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.self_attn.out_proj.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c92e0) {
%140 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.self_attn_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c9510) {
%141 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.self_attn_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c9710) {
%142 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.fc1.weight> : tensor<3072x768xf32>}> : () -> !torch.vtensor<[3072,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c9910) {
%143 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.fc1.bias> : tensor<3072xf32>}> : () -> !torch.vtensor<[3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c9b10) {
%144 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.fc2.weight> : tensor<768x3072xf32>}> : () -> !torch.vtensor<[768,3072],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c9d10) {
%145 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.fc2.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c9f40) {
%146 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.final_layer_norm.weight> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ca170) {
%147 = "torch.vtensor.literal"() <{value = dense_resource<_model.decoder.layers.11.final_layer_norm.bias> : tensor<768xf32>}> : () -> !torch.vtensor<[768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ca340) {
%148 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2046> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ca510) {
%149 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2047> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ca6e0) {
%150 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2058> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ca8b0) {
%151 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2094> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31caa80) {
%152 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2102> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cac50) {
%153 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2103> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cae20) {
%154 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2114> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31caff0) {
%155 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2150> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cb1c0) {
%156 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2158> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cb390) {
%157 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2159> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cb560) {
%158 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2170> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cb730) {
%159 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2206> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cb900) {
%160 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2214> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cbad0) {
%161 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2215> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cbca0) {
%162 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2226> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cbe70) {
%163 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2262> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cc040) {
%164 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2270> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cd220) {
%165 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2271> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cd3f0) {
%166 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2282> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cd5c0) {
%167 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2318> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cd790) {
%168 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2326> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cd960) {
%169 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2327> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cdb30) {
%170 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2338> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cdd00) {
%171 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2374> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cded0) {
%172 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2382> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ce0a0) {
%173 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2383> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ce270) {
%174 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2394> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ce440) {
%175 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2430> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ce610) {
%176 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2438> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ce7e0) {
%177 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2439> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ce9b0) {
%178 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2450> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ceb80) {
%179 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2486> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31ced50) {
%180 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2494> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cef20) {
%181 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2495> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cf0f0) {
%182 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2506> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cf2c0) {
%183 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2542> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cf490) {
%184 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2550> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cf660) {
%185 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2551> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31cf830) {
%186 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2562> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bf230) {
%187 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2598> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bf400) {
%188 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2606> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bf5d0) {
%189 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2607> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bf7a0) {
%190 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2618> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bf970) {
%191 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2654> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bfb40) {
%192 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2662> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bfd10) {
%193 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2663> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31bfee0) {
%194 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2674> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31c00b0) {
%195 = "torch.vtensor.literal"() <{value = dense_resource<_onnx__MatMul_2710> : tensor<768x768xf32>}> : () -> !torch.vtensor<[768,768],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.vtensor.literal'(0x5616a31b7800) {
%196 = "torch.vtensor.literal"() <{value = dense_resource<_> : tensor<2xsi64>}> : () -> !torch.vtensor<[2],si64>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.vtensor.literal -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31b7920) {
%197 = "torch.constant.int"() <{value = 0 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31b79e0) {
%198 = "torch.constant.int"() <{value = 0 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.select.int'(0x5616a31b7f90) {
%199 = "torch.aten.select.int"(%196, %197, %198) : (!torch.vtensor<[2],si64>, !torch.int, !torch.int) -> !torch.vtensor<[1],si64>
* Fold {
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DialectFoldInterface)
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.select.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.select.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.select.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.item'(0x5616a31b80c0) {
%200 = "torch.aten.item"(%199) : (!torch.vtensor<[1],si64>) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.item -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.item -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.item -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.eq.int'(0x5616a31b81b0) {
%201 = "torch.aten.eq.int"(%200, %197) : (!torch.int, !torch.int) -> !torch.bool
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.eq.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.eq.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.eq.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.Int.bool'(0x5616a31c1b50) {
%202 = "torch.aten.Int.bool"(%201) : (!torch.bool) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.Int.bool -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.Int.bool -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.Int.bool -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31c2050) {
%203 = "torch.constant.int"() <{value = 1 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.mul.int'(0x5616a31c2180) {
%204 = "torch.aten.mul.int"(%202, %203) : (!torch.int, !torch.int) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.add.int'(0x5616a31c2290) {
%205 = "torch.aten.add.int"(%200, %204) : (!torch.int, !torch.int) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.add.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.add.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.add.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31c23a0) {
%206 = "torch.constant.int"() <{value = 1 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.select.int'(0x5616a31c2460) {
%207 = "torch.aten.select.int"(%196, %197, %206) : (!torch.vtensor<[2],si64>, !torch.int, !torch.int) -> !torch.vtensor<[1],si64>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.select.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.select.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.select.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.item'(0x5616a31c2590) {
%208 = "torch.aten.item"(%207) : (!torch.vtensor<[1],si64>) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.item -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.item -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.item -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.eq.int'(0x5616a31c2680) {
%209 = "torch.aten.eq.int"(%208, %197) : (!torch.int, !torch.int) -> !torch.bool
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.eq.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.eq.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.eq.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.Int.bool'(0x5616a31c2790) {
%210 = "torch.aten.Int.bool"(%209) : (!torch.bool) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.Int.bool -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.Int.bool -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.Int.bool -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31c3110) {
%211 = "torch.constant.int"() <{value = 6 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.mul.int'(0x5616a31c3240) {
%212 = "torch.aten.mul.int"(%210, %211) : (!torch.int, !torch.int) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.add.int'(0x5616a31c3350) {
%213 = "torch.aten.add.int"(%208, %212) : (!torch.int, !torch.int) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.add.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.add.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.add.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.prim.ListConstruct'(0x5616a31c3460) {
%214 = "torch.prim.ListConstruct"(%205, %213) : (!torch.int, !torch.int) -> !torch.list<int>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.prim.ListConstruct -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.prim.ListConstruct -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.prim.ListConstruct -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.reshape'(0x5616a31c35e0) {
%215 = "torch.aten.reshape"(%arg0, %214) : (!torch.vtensor<[1,6],si64>, !torch.list<int>) -> !torch.vtensor<[1,6],si64>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.reshape -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.reshape -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.reshape -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31d8980) {
%216 = "torch.constant.int"() <{value = 1 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31d8a00) {
%217 = "torch.constant.int"() <{value = 0 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.size.int'(0x5616a31d8ac0) {
%218 = "torch.aten.size.int"(%215, %217) : (!torch.vtensor<[1,6],si64>, !torch.int) -> !torch.int
* Fold {
ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::detail::ConstantIntOpGenericAdaptorBase::Properties)
** Insert : 'torch.constant.int'(0x5616a321cf10)
** Replace : 'torch.aten.size.int'(0x5616a31d8ac0)
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a321cf10) {
%218 = "torch.constant.int"() <{value = 1 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
} -> FAILURE : failed to legalize generated constant 'torch.constant.int'
* Pattern : 'torch.aten.size.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenSizeIntOp"
ImplicitTypeIDRegistry::lookupOrInsert(mlir::arith::detail::ConstantOpGenericAdaptorBase::Properties)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferIntRangeInterface::Trait<Empty>)
** Insert : 'arith.constant'(0x5616a3209ae0)
** Insert : 'arith.addi'(0x5616a3212560)
** Insert : 'arith.constant'(0x5616a31f2cb0)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::arith::detail::CmpIOpGenericAdaptorBase::Properties)
** Insert : 'arith.cmpi'(0x5616a31eb720)
** Insert : 'arith.select'(0x5616a31f53c0)
** Insert : 'arith.constant'(0x5616a32966a0)
** Insert : 'arith.cmpi'(0x5616a3294100)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::cf::detail::AssertOpGenericAdaptorBase::Properties)
** Insert : 'cf.assert'(0x5616a31b3910)
** Insert : 'arith.cmpi'(0x5616a32a3a90)
** Insert : 'cf.assert'(0x5616a3320440)
** Insert : 'arith.index_cast'(0x5616a326ba70)
** Insert : 'tensor.dim'(0x5616a327f710)
** Insert : 'arith.index_cast'(0x5616a32967e0)
** Replace : 'torch.aten.size.int'(0x5616a31d8ac0)
"(anonymous namespace)::ConvertAtenSizeIntOp" result 1
//===-------------------------------------------===//
Legalizing operation : 'arith.constant'(0x5616a3209ae0) {
%220 = "arith.constant"() <{value = 2 : i64}> : () -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.addi'(0x5616a3212560) {
%221 = "arith.addi"(%219, %220) : (i64, i64) -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.constant'(0x5616a31f2cb0) {
%222 = "arith.constant"() <{value = 0 : i64}> : () -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.cmpi'(0x5616a31eb720) {
%223 = "arith.cmpi"(%219, %222) <{predicate = 5 : i64}> : (i64, i64) -> i1
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.select'(0x5616a31f53c0) {
%224 = "arith.select"(%223, %219, %221) : (i1, i64, i64) -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.constant'(0x5616a32966a0) {
%225 = "arith.constant"() <{value = 0 : i64}> : () -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.cmpi'(0x5616a3294100) {
%226 = "arith.cmpi"(%224, %225) <{predicate = 5 : i64}> : (i64, i64) -> i1
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'cf.assert'(0x5616a31b3910) {
"cf.assert"(%226) <{msg = "dim must be greater or equal to zero"}> : (i1) -> ()
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.cmpi'(0x5616a32a3a90) {
%227 = "arith.cmpi"(%224, %220) <{predicate = 2 : i64}> : (i64, i64) -> i1
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'cf.assert'(0x5616a3320440) {
"cf.assert"(%227) <{msg = "dim must be smaller than inputRank"}> : (i1) -> ()
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.index_cast'(0x5616a326ba70) {
%228 = "arith.index_cast"(%224) : (i64) -> index
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'tensor.dim'(0x5616a327f710) {
%229 = "tensor.dim"(%216, %228) : (tensor<1x6xi64>, index) -> index
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.index_cast'(0x5616a32967e0) {
%230 = "arith.index_cast"(%229) : (index) -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
} -> SUCCESS : pattern applied successfully
// *** IR Dump After Pattern Application ***
mlir-asm-printer: Verifying operation: func.func
func.func @main_graph(%arg0: !torch.vtensor<[1,6],si64>) -> !torch.vtensor<[1,6,768],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "2.3.0"} {
%0 = torch.vtensor.literal(dense_resource<_model.decoder.embed_tokens.weight> : tensor<50272x768xf32>) : !torch.vtensor<[50272,768],f32>
%1 = torch.vtensor.literal(dense_resource<_model.decoder.embed_positions.weight> : tensor<2050x768xf32>) : !torch.vtensor<[2050,768],f32>
%2 = torch.vtensor.literal(dense_resource<_model.decoder.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%3 = torch.vtensor.literal(dense_resource<_model.decoder.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%4 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%5 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%6 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%7 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%8 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%9 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%10 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%11 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%12 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%13 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%14 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%15 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%16 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%17 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%18 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%19 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%20 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%21 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%22 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%23 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%24 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%25 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%26 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%27 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%28 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%29 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%30 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%31 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%32 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%33 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%34 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%35 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%36 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%37 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%38 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%39 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%40 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%41 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%42 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%43 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%44 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%45 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%46 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%47 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%48 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%49 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%50 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%51 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%52 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%53 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%54 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%55 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%56 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%57 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%58 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%59 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%60 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%61 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%62 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%63 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%64 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%65 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%66 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%67 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%68 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%69 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%70 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%71 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%72 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%73 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%74 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%75 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%76 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%77 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%78 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%79 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%80 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%81 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%82 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%83 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%84 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%85 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%86 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%87 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%88 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%89 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%90 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%91 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%92 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%93 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%94 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%95 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%96 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%97 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%98 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%99 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%100 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%101 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%102 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%103 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%104 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%105 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%106 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%107 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%108 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%109 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%110 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%111 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%112 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%113 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%114 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%115 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%116 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%117 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%118 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%119 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%120 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%121 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%122 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%123 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%124 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%125 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%126 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%127 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%128 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%129 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%130 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%131 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%132 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%133 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%134 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%135 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%136 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%137 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%138 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%139 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%140 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%141 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%142 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%143 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%144 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%145 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%146 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%147 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%148 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2046> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%149 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2047> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%150 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2058> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%151 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2094> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%152 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2102> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%153 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2103> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%154 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2114> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%155 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2150> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%156 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2158> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%157 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2159> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%158 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2170> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%159 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2206> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%160 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2214> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%161 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2215> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%162 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2226> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%163 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2262> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%164 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2270> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%165 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2271> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%166 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2282> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%167 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2318> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%168 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2326> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%169 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2327> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%170 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2338> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%171 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2374> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%172 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2382> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%173 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2383> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%174 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2394> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%175 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2430> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%176 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2438> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%177 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2439> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%178 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2450> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%179 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2486> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%180 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2494> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%181 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2495> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%182 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2506> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%183 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2542> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%184 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2550> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%185 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2551> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%186 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2562> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%187 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2598> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%188 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2606> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%189 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2607> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%190 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2618> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%191 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2654> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%192 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2662> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%193 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2663> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%194 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2674> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%195 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2710> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%196 = torch.vtensor.literal(dense_resource<_> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0 = torch.constant.int 0
%int0_0 = torch.constant.int 0
%197 = torch.aten.select.int %196, %int0, %int0_0 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%198 = torch.aten.item %197 : !torch.vtensor<[1],si64> -> !torch.int
%199 = torch.aten.eq.int %198, %int0 : !torch.int, !torch.int -> !torch.bool
%200 = torch.aten.Int.bool %199 : !torch.bool -> !torch.int
%int1 = torch.constant.int 1
%201 = torch.aten.mul.int %200, %int1 : !torch.int, !torch.int -> !torch.int
%202 = torch.aten.add.int %198, %201 : !torch.int, !torch.int -> !torch.int
%int1_1 = torch.constant.int 1
%203 = torch.aten.select.int %196, %int0, %int1_1 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%204 = torch.aten.item %203 : !torch.vtensor<[1],si64> -> !torch.int
%205 = torch.aten.eq.int %204, %int0 : !torch.int, !torch.int -> !torch.bool
%206 = torch.aten.Int.bool %205 : !torch.bool -> !torch.int
%int6 = torch.constant.int 6
%207 = torch.aten.mul.int %206, %int6 : !torch.int, !torch.int -> !torch.int
%208 = torch.aten.add.int %204, %207 : !torch.int, !torch.int -> !torch.int
%209 = torch.prim.ListConstruct %202, %208 : (!torch.int, !torch.int) -> !torch.list<int>
%210 = torch.aten.reshape %arg0, %209 : !torch.vtensor<[1,6],si64>, !torch.list<int> -> !torch.vtensor<[1,6],si64>
%211 = builtin.unrealized_conversion_cast %210 : !torch.vtensor<[1,6],si64> to tensor<1x6xi64>
%int1_2 = torch.constant.int 1
%int0_3 = torch.constant.int 0
%212 = builtin.unrealized_conversion_cast %int0_3 : !torch.int to i64
%c2_i64 = arith.constant 2 : i64
%213 = arith.addi %212, %c2_i64 : i64
%c0_i64 = arith.constant 0 : i64
%214 = arith.cmpi sge, %212, %c0_i64 : i64
%215 = arith.select %214, %212, %213 : i64
%c0_i64_4 = arith.constant 0 : i64
%216 = arith.cmpi sge, %215, %c0_i64_4 : i64
cf.assert %216, "dim must be greater or equal to zero"
%217 = arith.cmpi slt, %215, %c2_i64 : i64
cf.assert %217, "dim must be smaller than inputRank"
%218 = arith.index_cast %215 : i64 to index
%dim = tensor.dim %211, %218 : tensor<1x6xi64>
%219 = arith.index_cast %dim : index to i64
%220 = torch.aten.size.int %210, %int0_3 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%221 = torch.aten.mul.int %int1_2, %220 : !torch.int, !torch.int -> !torch.int
%int1_5 = torch.constant.int 1
%222 = torch.aten.size.int %210, %int1_5 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%223 = torch.aten.mul.int %221, %222 : !torch.int, !torch.int -> !torch.int
%int0_6 = torch.constant.int 0
%224 = torch.aten.size.int %0, %int0_6 : !torch.vtensor<[50272,768],f32>, !torch.int -> !torch.int
%int1_7 = torch.constant.int 1
%225 = torch.aten.size.int %0, %int1_7 : !torch.vtensor<[50272,768],f32>, !torch.int -> !torch.int
%226 = torch.prim.ListConstruct %223, %int1_2 : (!torch.int, !torch.int) -> !torch.list<int>
%227 = torch.aten.view %210, %226 : !torch.vtensor<[1,6],si64>, !torch.list<int> -> !torch.vtensor<[6,1],si64>
%int0_8 = torch.constant.int 0
%false = torch.constant.bool false
%228 = torch.aten.gather %0, %int0_8, %227, %false : !torch.vtensor<[50272,768],f32>, !torch.int, !torch.vtensor<[6,1],si64>, !torch.bool -> !torch.vtensor<[6,1],f32>
%229 = torch.prim.ListConstruct %223, %225 : (!torch.int, !torch.int) -> !torch.list<int>
%230 = torch.aten.expand %228, %229, %false : !torch.vtensor<[6,1],f32>, !torch.list<int>, !torch.bool -> !torch.vtensor<[6,768],f32>
%231 = torch.prim.ListConstruct %220, %222, %225 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%232 = torch.aten.view %230, %231 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%233 = torch.vtensor.literal(dense_resource<__1> : tensor<1x1x6x6xf32>) : !torch.vtensor<[1,1,6,6],f32>
%234 = torch.vtensor.literal(dense_resource<__2> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%235 = torch.vtensor.literal(dense_resource<__3> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%236 = torch.vtensor.literal(dense<1> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%237 = torch.vtensor.literal(dense_resource<__4> : tensor<si64>) : !torch.vtensor<[],si64>
%238 = torch.aten.mul.Tensor %236, %237 : !torch.vtensor<[4],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[4],si64>
%239 = torch.aten.eq.Tensor %234, %238 : !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],i1>
%240 = torch.aten.where.self %239, %236, %234 : !torch.vtensor<[4],i1>, !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],si64>
%int0_9 = torch.constant.int 0
%int0_10 = torch.constant.int 0
%241 = torch.aten.select.int %240, %int0_9, %int0_10 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%242 = torch.aten.item %241 : !torch.vtensor<[1],si64> -> !torch.int
%int1_11 = torch.constant.int 1
%243 = torch.aten.select.int %240, %int0_9, %int1_11 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%244 = torch.aten.item %243 : !torch.vtensor<[1],si64> -> !torch.int
%int2 = torch.constant.int 2
%245 = torch.aten.select.int %240, %int0_9, %int2 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%246 = torch.aten.item %245 : !torch.vtensor<[1],si64> -> !torch.int
%int3 = torch.constant.int 3
%247 = torch.aten.select.int %240, %int0_9, %int3 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%248 = torch.aten.item %247 : !torch.vtensor<[1],si64> -> !torch.int
%249 = torch.prim.ListConstruct %242, %244, %246, %248 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%250 = torch.aten.broadcast_to %233, %249 : !torch.vtensor<[1,1,6,6],f32>, !torch.list<int> -> !torch.vtensor<[?,?,6,6],f32>
%251 = torch.vtensor.literal(dense_resource<__5> : tensor<1x1x1x6xf32>) : !torch.vtensor<[1,1,1,6],f32>
%252 = torch.vtensor.literal(dense_resource<__6> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%253 = torch.vtensor.literal(dense_resource<__7> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%254 = torch.vtensor.literal(dense<1> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%255 = torch.vtensor.literal(dense_resource<__8> : tensor<si64>) : !torch.vtensor<[],si64>
%256 = torch.aten.mul.Tensor %254, %255 : !torch.vtensor<[4],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[4],si64>
%257 = torch.aten.eq.Tensor %252, %256 : !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],i1>
%258 = torch.aten.where.self %257, %254, %252 : !torch.vtensor<[4],i1>, !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],si64>
%int0_12 = torch.constant.int 0
%int0_13 = torch.constant.int 0
%259 = torch.aten.select.int %258, %int0_12, %int0_13 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%260 = torch.aten.item %259 : !torch.vtensor<[1],si64> -> !torch.int
%int1_14 = torch.constant.int 1
%261 = torch.aten.select.int %258, %int0_12, %int1_14 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%262 = torch.aten.item %261 : !torch.vtensor<[1],si64> -> !torch.int
%int2_15 = torch.constant.int 2
%263 = torch.aten.select.int %258, %int0_12, %int2_15 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%264 = torch.aten.item %263 : !torch.vtensor<[1],si64> -> !torch.int
%int3_16 = torch.constant.int 3
%265 = torch.aten.select.int %258, %int0_12, %int3_16 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%266 = torch.aten.item %265 : !torch.vtensor<[1],si64> -> !torch.int
%267 = torch.prim.ListConstruct %260, %262, %264, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%268 = torch.aten.broadcast_to %251, %267 : !torch.vtensor<[1,1,1,6],f32>, !torch.list<int> -> !torch.vtensor<[?,?,?,6],f32>
%int6_17 = torch.constant.int 6
%none = torch.constant.none
%false_18 = torch.constant.bool false
%269 = torch.aten.to.dtype %268, %int6_17, %false_18, %false_18, %none : !torch.vtensor<[?,?,?,6],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],f32>
%270 = torch.vtensor.literal(dense_resource<__9> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_19 = torch.constant.int 1
%271 = torch.aten.sub.Tensor %270, %269, %int1_19 : !torch.vtensor<[],f32>, !torch.vtensor<[?,?,?,6],f32>, !torch.int -> !torch.vtensor<[?,?,?,6],f32>
%int11 = torch.constant.int 11
%none_20 = torch.constant.none
%false_21 = torch.constant.bool false
%272 = torch.aten.to.dtype %271, %int11, %false_21, %false_21, %none_20 : !torch.vtensor<[?,?,?,6],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],i1>
%int11_22 = torch.constant.int 11
%none_23 = torch.constant.none
%false_24 = torch.constant.bool false
%273 = torch.aten.to.dtype %272, %int11_22, %false_24, %false_24, %none_23 : !torch.vtensor<[?,?,?,6],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],i1>
%274 = torch.vtensor.literal(dense_resource<__10> : tensor<f32>) : !torch.vtensor<[],f32>
%275 = torch.aten.where.self %273, %274, %271 : !torch.vtensor<[?,?,?,6],i1>, !torch.vtensor<[],f32>, !torch.vtensor<[?,?,?,6],f32> -> !torch.vtensor<[?,?,?,6],f32>
%int6_25 = torch.constant.int 6
%none_26 = torch.constant.none
%false_27 = torch.constant.bool false
%276 = torch.aten.to.dtype %275, %int6_25, %false_27, %false_27, %none_26 : !torch.vtensor<[?,?,?,6],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],f32>
%int1_28 = torch.constant.int 1
%277 = torch.aten.add.Tensor %276, %250, %int1_28 : !torch.vtensor<[?,?,?,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,?,6,6],f32>
%278 = torch.vtensor.literal(dense_resource<__11> : tensor<1x6xsi64>) : !torch.vtensor<[1,6],si64>
%279 = torch.vtensor.literal(dense_resource<__12> : tensor<si32>) : !torch.vtensor<[],si32>
%int2_29 = torch.constant.int 2
%int0_30 = torch.constant.int 0
%280 = torch.aten.item %279 : !torch.vtensor<[],si32> -> !torch.int
%281 = torch.aten.lt.int %280, %int0_30 : !torch.int, !torch.int -> !torch.bool
%282 = torch.aten.Int.bool %281 : !torch.bool -> !torch.int
%283 = torch.aten.mul.int %282, %int2_29 : !torch.int, !torch.int -> !torch.int
%284 = torch.aten.add.int %280, %283 : !torch.int, !torch.int -> !torch.int
%int4 = torch.constant.int 4
%285 = torch.aten.cumsum %278, %284, %int4 : !torch.vtensor<[1,6],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,6],si64>
%286 = torch.vtensor.literal(dense_resource<__13> : tensor<1x6xsi64>) : !torch.vtensor<[1,6],si64>
%287 = torch.aten.mul.Tensor %285, %286 : !torch.vtensor<[1,6],si64>, !torch.vtensor<[1,6],si64> -> !torch.vtensor<[1,6],si64>
%int5 = torch.constant.int 5
%none_31 = torch.constant.none
%false_32 = torch.constant.bool false
%288 = torch.aten.to.dtype %287, %int5, %false_32, %false_32, %none_31 : !torch.vtensor<[1,6],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,6],si64>
%289 = torch.vtensor.literal(dense_resource<__14> : tensor<si64>) : !torch.vtensor<[],si64>
%int1_33 = torch.constant.int 1
%290 = torch.aten.sub.Tensor %288, %289, %int1_33 : !torch.vtensor<[1,6],si64>, !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1,6],si64>
%291 = torch.vtensor.literal(dense_resource<__15> : tensor<si64>) : !torch.vtensor<[],si64>
%int1_34 = torch.constant.int 1
%292 = torch.aten.add.Tensor %290, %291, %int1_34 : !torch.vtensor<[1,6],si64>, !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1,6],si64>
%int1_35 = torch.constant.int 1
%int0_36 = torch.constant.int 0
%293 = torch.aten.size.int %292, %int0_36 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%294 = torch.aten.mul.int %int1_35, %293 : !torch.int, !torch.int -> !torch.int
%int1_37 = torch.constant.int 1
%295 = torch.aten.size.int %292, %int1_37 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%296 = torch.aten.mul.int %294, %295 : !torch.int, !torch.int -> !torch.int
%int0_38 = torch.constant.int 0
%297 = torch.aten.size.int %1, %int0_38 : !torch.vtensor<[2050,768],f32>, !torch.int -> !torch.int
%int1_39 = torch.constant.int 1
%298 = torch.aten.size.int %1, %int1_39 : !torch.vtensor<[2050,768],f32>, !torch.int -> !torch.int
%299 = torch.prim.ListConstruct %296, %int1_35 : (!torch.int, !torch.int) -> !torch.list<int>
%300 = torch.aten.view %292, %299 : !torch.vtensor<[1,6],si64>, !torch.list<int> -> !torch.vtensor<[6,1],si64>
%int0_40 = torch.constant.int 0
%false_41 = torch.constant.bool false
%301 = torch.aten.gather %1, %int0_40, %300, %false_41 : !torch.vtensor<[2050,768],f32>, !torch.int, !torch.vtensor<[6,1],si64>, !torch.bool -> !torch.vtensor<[6,1],f32>
%302 = torch.prim.ListConstruct %296, %298 : (!torch.int, !torch.int) -> !torch.list<int>
%303 = torch.aten.expand %301, %302, %false_41 : !torch.vtensor<[6,1],f32>, !torch.list<int>, !torch.bool -> !torch.vtensor<[6,768],f32>
%304 = torch.prim.ListConstruct %293, %295, %298 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%305 = torch.aten.view %303, %304 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%int1_42 = torch.constant.int 1
%306 = torch.aten.add.Tensor %232, %305, %int1_42 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06 = torch.constant.float 9.9999997473787516E-6
%int768 = torch.constant.int 768
%307 = torch.prim.ListConstruct %int768 : (!torch.int) -> !torch.list<int>
%result0, %result1, %result2 = torch.aten.native_layer_norm %306, %307, %8, %9, %float9.999990e-06 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%308 = torch.aten.matmul %result0, %148 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_43 = torch.constant.int 1
%309 = torch.aten.add.Tensor %6, %308, %int1_43 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%310 = torch.vtensor.literal(dense_resource<__16> : tensor<f32>) : !torch.vtensor<[],f32>
%311 = torch.aten.mul.Tensor %309, %310 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%312 = torch.aten.matmul %result0, %149 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_44 = torch.constant.int 1
%313 = torch.aten.add.Tensor %4, %312, %int1_44 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%314 = torch.vtensor.literal(dense_resource<__17> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%315 = torch.vtensor.literal(dense_resource<__18> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_45 = torch.constant.int 0
%int0_46 = torch.constant.int 0
%316 = torch.aten.select.int %314, %int0_45, %int0_46 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%317 = torch.aten.item %316 : !torch.vtensor<[1],si64> -> !torch.int
%318 = torch.aten.eq.int %317, %int0_45 : !torch.int, !torch.int -> !torch.bool
%319 = torch.aten.Int.bool %318 : !torch.bool -> !torch.int
%int1_47 = torch.constant.int 1
%320 = torch.aten.mul.int %319, %int1_47 : !torch.int, !torch.int -> !torch.int
%321 = torch.aten.add.int %317, %320 : !torch.int, !torch.int -> !torch.int
%int1_48 = torch.constant.int 1
%322 = torch.aten.select.int %314, %int0_45, %int1_48 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%323 = torch.aten.item %322 : !torch.vtensor<[1],si64> -> !torch.int
%324 = torch.aten.eq.int %323, %int0_45 : !torch.int, !torch.int -> !torch.bool
%325 = torch.aten.Int.bool %324 : !torch.bool -> !torch.int
%int6_49 = torch.constant.int 6
%326 = torch.aten.mul.int %325, %int6_49 : !torch.int, !torch.int -> !torch.int
%327 = torch.aten.add.int %323, %326 : !torch.int, !torch.int -> !torch.int
%int2_50 = torch.constant.int 2
%328 = torch.aten.select.int %314, %int0_45, %int2_50 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%329 = torch.aten.item %328 : !torch.vtensor<[1],si64> -> !torch.int
%330 = torch.aten.eq.int %329, %int0_45 : !torch.int, !torch.int -> !torch.bool
%331 = torch.aten.Int.bool %330 : !torch.bool -> !torch.int
%int768_51 = torch.constant.int 768
%332 = torch.aten.mul.int %331, %int768_51 : !torch.int, !torch.int -> !torch.int
%333 = torch.aten.add.int %329, %332 : !torch.int, !torch.int -> !torch.int
%int3_52 = torch.constant.int 3
%334 = torch.aten.select.int %314, %int0_45, %int3_52 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%335 = torch.aten.item %334 : !torch.vtensor<[1],si64> -> !torch.int
%336 = torch.aten.eq.int %335, %int0_45 : !torch.int, !torch.int -> !torch.bool
%337 = torch.aten.Int.bool %336 : !torch.bool -> !torch.int
%338 = torch.aten.mul.int %337, %int0_45 : !torch.int, !torch.int -> !torch.int
%339 = torch.aten.add.int %335, %338 : !torch.int, !torch.int -> !torch.int
%340 = torch.prim.ListConstruct %321, %327, %333, %339 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%341 = torch.aten.reshape %313, %340 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_53 = torch.constant.int 1
%int2_54 = torch.constant.int 2
%342 = torch.aten.transpose.int %341, %int1_53, %int2_54 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%343 = torch.aten.matmul %result0, %150 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_55 = torch.constant.int 1
%344 = torch.aten.add.Tensor %5, %343, %int1_55 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_56 = torch.constant.int 0
%int0_57 = torch.constant.int 0
%345 = torch.aten.select.int %315, %int0_56, %int0_57 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%346 = torch.aten.item %345 : !torch.vtensor<[1],si64> -> !torch.int
%347 = torch.aten.eq.int %346, %int0_56 : !torch.int, !torch.int -> !torch.bool
%348 = torch.aten.Int.bool %347 : !torch.bool -> !torch.int
%int1_58 = torch.constant.int 1
%349 = torch.aten.mul.int %348, %int1_58 : !torch.int, !torch.int -> !torch.int
%350 = torch.aten.add.int %346, %349 : !torch.int, !torch.int -> !torch.int
%int1_59 = torch.constant.int 1
%351 = torch.aten.select.int %315, %int0_56, %int1_59 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%352 = torch.aten.item %351 : !torch.vtensor<[1],si64> -> !torch.int
%353 = torch.aten.eq.int %352, %int0_56 : !torch.int, !torch.int -> !torch.bool
%354 = torch.aten.Int.bool %353 : !torch.bool -> !torch.int
%int6_60 = torch.constant.int 6
%355 = torch.aten.mul.int %354, %int6_60 : !torch.int, !torch.int -> !torch.int
%356 = torch.aten.add.int %352, %355 : !torch.int, !torch.int -> !torch.int
%int2_61 = torch.constant.int 2
%357 = torch.aten.select.int %315, %int0_56, %int2_61 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%358 = torch.aten.item %357 : !torch.vtensor<[1],si64> -> !torch.int
%359 = torch.aten.eq.int %358, %int0_56 : !torch.int, !torch.int -> !torch.bool
%360 = torch.aten.Int.bool %359 : !torch.bool -> !torch.int
%int768_62 = torch.constant.int 768
%361 = torch.aten.mul.int %360, %int768_62 : !torch.int, !torch.int -> !torch.int
%362 = torch.aten.add.int %358, %361 : !torch.int, !torch.int -> !torch.int
%int3_63 = torch.constant.int 3
%363 = torch.aten.select.int %315, %int0_56, %int3_63 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%364 = torch.aten.item %363 : !torch.vtensor<[1],si64> -> !torch.int
%365 = torch.aten.eq.int %364, %int0_56 : !torch.int, !torch.int -> !torch.bool
%366 = torch.aten.Int.bool %365 : !torch.bool -> !torch.int
%367 = torch.aten.mul.int %366, %int0_56 : !torch.int, !torch.int -> !torch.int
%368 = torch.aten.add.int %364, %367 : !torch.int, !torch.int -> !torch.int
%369 = torch.prim.ListConstruct %350, %356, %362, %368 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%370 = torch.aten.reshape %344, %369 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_64 = torch.constant.int 1
%int2_65 = torch.constant.int 2
%371 = torch.aten.transpose.int %370, %int1_64, %int2_65 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%372 = torch.vtensor.literal(dense_resource<__19> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_66 = torch.constant.int 0
%int0_67 = torch.constant.int 0
%373 = torch.aten.select.int %372, %int0_66, %int0_67 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%374 = torch.aten.item %373 : !torch.vtensor<[1],si64> -> !torch.int
%375 = torch.aten.eq.int %374, %int0_66 : !torch.int, !torch.int -> !torch.bool
%376 = torch.aten.Int.bool %375 : !torch.bool -> !torch.int
%int1_68 = torch.constant.int 1
%377 = torch.aten.mul.int %376, %int1_68 : !torch.int, !torch.int -> !torch.int
%378 = torch.aten.add.int %374, %377 : !torch.int, !torch.int -> !torch.int
%int1_69 = torch.constant.int 1
%379 = torch.aten.select.int %372, %int0_66, %int1_69 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%380 = torch.aten.item %379 : !torch.vtensor<[1],si64> -> !torch.int
%381 = torch.aten.eq.int %380, %int0_66 : !torch.int, !torch.int -> !torch.bool
%382 = torch.aten.Int.bool %381 : !torch.bool -> !torch.int
%int6_70 = torch.constant.int 6
%383 = torch.aten.mul.int %382, %int6_70 : !torch.int, !torch.int -> !torch.int
%384 = torch.aten.add.int %380, %383 : !torch.int, !torch.int -> !torch.int
%int2_71 = torch.constant.int 2
%385 = torch.aten.select.int %372, %int0_66, %int2_71 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%386 = torch.aten.item %385 : !torch.vtensor<[1],si64> -> !torch.int
%387 = torch.aten.eq.int %386, %int0_66 : !torch.int, !torch.int -> !torch.bool
%388 = torch.aten.Int.bool %387 : !torch.bool -> !torch.int
%int768_72 = torch.constant.int 768
%389 = torch.aten.mul.int %388, %int768_72 : !torch.int, !torch.int -> !torch.int
%390 = torch.aten.add.int %386, %389 : !torch.int, !torch.int -> !torch.int
%int3_73 = torch.constant.int 3
%391 = torch.aten.select.int %372, %int0_66, %int3_73 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%392 = torch.aten.item %391 : !torch.vtensor<[1],si64> -> !torch.int
%393 = torch.aten.eq.int %392, %int0_66 : !torch.int, !torch.int -> !torch.bool
%394 = torch.aten.Int.bool %393 : !torch.bool -> !torch.int
%395 = torch.aten.mul.int %394, %int0_66 : !torch.int, !torch.int -> !torch.int
%396 = torch.aten.add.int %392, %395 : !torch.int, !torch.int -> !torch.int
%397 = torch.prim.ListConstruct %378, %384, %390, %396 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%398 = torch.aten.reshape %311, %397 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_74 = torch.constant.int 1
%int2_75 = torch.constant.int 2
%399 = torch.aten.transpose.int %398, %int1_74, %int2_75 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%400 = torch.vtensor.literal(dense_resource<__20> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%401 = torch.vtensor.literal(dense_resource<__21> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%402 = torch.vtensor.literal(dense_resource<__22> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_76 = torch.constant.int 0
%int0_77 = torch.constant.int 0
%403 = torch.aten.select.int %400, %int0_76, %int0_77 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%404 = torch.aten.item %403 : !torch.vtensor<[1],si64> -> !torch.int
%405 = torch.aten.eq.int %404, %int0_76 : !torch.int, !torch.int -> !torch.bool
%406 = torch.aten.Int.bool %405 : !torch.bool -> !torch.int
%int1_78 = torch.constant.int 1
%407 = torch.aten.mul.int %406, %int1_78 : !torch.int, !torch.int -> !torch.int
%408 = torch.aten.add.int %404, %407 : !torch.int, !torch.int -> !torch.int
%int1_79 = torch.constant.int 1
%409 = torch.aten.select.int %400, %int0_76, %int1_79 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%410 = torch.aten.item %409 : !torch.vtensor<[1],si64> -> !torch.int
%411 = torch.aten.eq.int %410, %int0_76 : !torch.int, !torch.int -> !torch.bool
%412 = torch.aten.Int.bool %411 : !torch.bool -> !torch.int
%int12 = torch.constant.int 12
%413 = torch.aten.mul.int %412, %int12 : !torch.int, !torch.int -> !torch.int
%414 = torch.aten.add.int %410, %413 : !torch.int, !torch.int -> !torch.int
%int2_80 = torch.constant.int 2
%415 = torch.aten.select.int %400, %int0_76, %int2_80 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%416 = torch.aten.item %415 : !torch.vtensor<[1],si64> -> !torch.int
%417 = torch.aten.eq.int %416, %int0_76 : !torch.int, !torch.int -> !torch.bool
%418 = torch.aten.Int.bool %417 : !torch.bool -> !torch.int
%int6_81 = torch.constant.int 6
%419 = torch.aten.mul.int %418, %int6_81 : !torch.int, !torch.int -> !torch.int
%420 = torch.aten.add.int %416, %419 : !torch.int, !torch.int -> !torch.int
%421 = torch.prim.ListConstruct %408, %414, %420 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%422 = torch.aten.reshape %399, %421 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_82 = torch.constant.int 0
%int0_83 = torch.constant.int 0
%423 = torch.aten.select.int %401, %int0_82, %int0_83 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%424 = torch.aten.item %423 : !torch.vtensor<[1],si64> -> !torch.int
%425 = torch.aten.eq.int %424, %int0_82 : !torch.int, !torch.int -> !torch.bool
%426 = torch.aten.Int.bool %425 : !torch.bool -> !torch.int
%int1_84 = torch.constant.int 1
%427 = torch.aten.mul.int %426, %int1_84 : !torch.int, !torch.int -> !torch.int
%428 = torch.aten.add.int %424, %427 : !torch.int, !torch.int -> !torch.int
%int1_85 = torch.constant.int 1
%429 = torch.aten.select.int %401, %int0_82, %int1_85 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%430 = torch.aten.item %429 : !torch.vtensor<[1],si64> -> !torch.int
%431 = torch.aten.eq.int %430, %int0_82 : !torch.int, !torch.int -> !torch.bool
%432 = torch.aten.Int.bool %431 : !torch.bool -> !torch.int
%int12_86 = torch.constant.int 12
%433 = torch.aten.mul.int %432, %int12_86 : !torch.int, !torch.int -> !torch.int
%434 = torch.aten.add.int %430, %433 : !torch.int, !torch.int -> !torch.int
%int2_87 = torch.constant.int 2
%435 = torch.aten.select.int %401, %int0_82, %int2_87 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%436 = torch.aten.item %435 : !torch.vtensor<[1],si64> -> !torch.int
%437 = torch.aten.eq.int %436, %int0_82 : !torch.int, !torch.int -> !torch.bool
%438 = torch.aten.Int.bool %437 : !torch.bool -> !torch.int
%int6_88 = torch.constant.int 6
%439 = torch.aten.mul.int %438, %int6_88 : !torch.int, !torch.int -> !torch.int
%440 = torch.aten.add.int %436, %439 : !torch.int, !torch.int -> !torch.int
%441 = torch.prim.ListConstruct %428, %434, %440 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%442 = torch.aten.reshape %342, %441 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_89 = torch.constant.int 0
%int0_90 = torch.constant.int 0
%443 = torch.aten.select.int %402, %int0_89, %int0_90 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%444 = torch.aten.item %443 : !torch.vtensor<[1],si64> -> !torch.int
%445 = torch.aten.eq.int %444, %int0_89 : !torch.int, !torch.int -> !torch.bool
%446 = torch.aten.Int.bool %445 : !torch.bool -> !torch.int
%int1_91 = torch.constant.int 1
%447 = torch.aten.mul.int %446, %int1_91 : !torch.int, !torch.int -> !torch.int
%448 = torch.aten.add.int %444, %447 : !torch.int, !torch.int -> !torch.int
%int1_92 = torch.constant.int 1
%449 = torch.aten.select.int %402, %int0_89, %int1_92 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%450 = torch.aten.item %449 : !torch.vtensor<[1],si64> -> !torch.int
%451 = torch.aten.eq.int %450, %int0_89 : !torch.int, !torch.int -> !torch.bool
%452 = torch.aten.Int.bool %451 : !torch.bool -> !torch.int
%int12_93 = torch.constant.int 12
%453 = torch.aten.mul.int %452, %int12_93 : !torch.int, !torch.int -> !torch.int
%454 = torch.aten.add.int %450, %453 : !torch.int, !torch.int -> !torch.int
%int2_94 = torch.constant.int 2
%455 = torch.aten.select.int %402, %int0_89, %int2_94 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%456 = torch.aten.item %455 : !torch.vtensor<[1],si64> -> !torch.int
%457 = torch.aten.eq.int %456, %int0_89 : !torch.int, !torch.int -> !torch.bool
%458 = torch.aten.Int.bool %457 : !torch.bool -> !torch.int
%int6_95 = torch.constant.int 6
%459 = torch.aten.mul.int %458, %int6_95 : !torch.int, !torch.int -> !torch.int
%460 = torch.aten.add.int %456, %459 : !torch.int, !torch.int -> !torch.int
%461 = torch.prim.ListConstruct %448, %454, %460 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%462 = torch.aten.reshape %371, %461 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_96 = torch.constant.int 1
%int2_97 = torch.constant.int 2
%463 = torch.aten.transpose.int %442, %int1_96, %int2_97 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%464 = torch.aten.matmul %422, %463 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%465 = torch.vtensor.literal(dense_resource<__23> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_98 = torch.constant.int 0
%int0_99 = torch.constant.int 0
%466 = torch.aten.select.int %465, %int0_98, %int0_99 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%467 = torch.aten.item %466 : !torch.vtensor<[1],si64> -> !torch.int
%468 = torch.aten.eq.int %467, %int0_98 : !torch.int, !torch.int -> !torch.bool
%469 = torch.aten.Int.bool %468 : !torch.bool -> !torch.int
%int12_100 = torch.constant.int 12
%470 = torch.aten.mul.int %469, %int12_100 : !torch.int, !torch.int -> !torch.int
%471 = torch.aten.add.int %467, %470 : !torch.int, !torch.int -> !torch.int
%int1_101 = torch.constant.int 1
%472 = torch.aten.select.int %465, %int0_98, %int1_101 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%473 = torch.aten.item %472 : !torch.vtensor<[1],si64> -> !torch.int
%474 = torch.aten.eq.int %473, %int0_98 : !torch.int, !torch.int -> !torch.bool
%475 = torch.aten.Int.bool %474 : !torch.bool -> !torch.int
%int6_102 = torch.constant.int 6
%476 = torch.aten.mul.int %475, %int6_102 : !torch.int, !torch.int -> !torch.int
%477 = torch.aten.add.int %473, %476 : !torch.int, !torch.int -> !torch.int
%int2_103 = torch.constant.int 2
%478 = torch.aten.select.int %465, %int0_98, %int2_103 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%479 = torch.aten.item %478 : !torch.vtensor<[1],si64> -> !torch.int
%480 = torch.aten.eq.int %479, %int0_98 : !torch.int, !torch.int -> !torch.bool
%481 = torch.aten.Int.bool %480 : !torch.bool -> !torch.int
%int6_104 = torch.constant.int 6
%482 = torch.aten.mul.int %481, %int6_104 : !torch.int, !torch.int -> !torch.int
%483 = torch.aten.add.int %479, %482 : !torch.int, !torch.int -> !torch.int
%int3_105 = torch.constant.int 3
%484 = torch.aten.select.int %465, %int0_98, %int3_105 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%485 = torch.aten.item %484 : !torch.vtensor<[1],si64> -> !torch.int
%486 = torch.aten.eq.int %485, %int0_98 : !torch.int, !torch.int -> !torch.bool
%487 = torch.aten.Int.bool %486 : !torch.bool -> !torch.int
%488 = torch.aten.mul.int %487, %int0_98 : !torch.int, !torch.int -> !torch.int
%489 = torch.aten.add.int %485, %488 : !torch.int, !torch.int -> !torch.int
%490 = torch.prim.ListConstruct %471, %477, %483, %489 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%491 = torch.aten.reshape %464, %490 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_106 = torch.constant.int 1
%492 = torch.aten.add.Tensor %491, %277, %int1_106 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%493 = torch.vtensor.literal(dense_resource<__24> : tensor<f32>) : !torch.vtensor<[],f32>
%494 = torch.aten.maximum %492, %493 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%495 = torch.vtensor.literal(dense_resource<__25> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_107 = torch.constant.int 0
%int0_108 = torch.constant.int 0
%496 = torch.aten.select.int %495, %int0_107, %int0_108 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%497 = torch.aten.item %496 : !torch.vtensor<[1],si64> -> !torch.int
%498 = torch.aten.eq.int %497, %int0_107 : !torch.int, !torch.int -> !torch.bool
%499 = torch.aten.Int.bool %498 : !torch.bool -> !torch.int
%int-1 = torch.constant.int -1
%500 = torch.aten.mul.int %499, %int-1 : !torch.int, !torch.int -> !torch.int
%501 = torch.aten.add.int %497, %500 : !torch.int, !torch.int -> !torch.int
%int1_109 = torch.constant.int 1
%502 = torch.aten.select.int %495, %int0_107, %int1_109 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%503 = torch.aten.item %502 : !torch.vtensor<[1],si64> -> !torch.int
%504 = torch.aten.eq.int %503, %int0_107 : !torch.int, !torch.int -> !torch.bool
%505 = torch.aten.Int.bool %504 : !torch.bool -> !torch.int
%int12_110 = torch.constant.int 12
%506 = torch.aten.mul.int %505, %int12_110 : !torch.int, !torch.int -> !torch.int
%507 = torch.aten.add.int %503, %506 : !torch.int, !torch.int -> !torch.int
%int2_111 = torch.constant.int 2
%508 = torch.aten.select.int %495, %int0_107, %int2_111 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%509 = torch.aten.item %508 : !torch.vtensor<[1],si64> -> !torch.int
%510 = torch.aten.eq.int %509, %int0_107 : !torch.int, !torch.int -> !torch.bool
%511 = torch.aten.Int.bool %510 : !torch.bool -> !torch.int
%int6_112 = torch.constant.int 6
%512 = torch.aten.mul.int %511, %int6_112 : !torch.int, !torch.int -> !torch.int
%513 = torch.aten.add.int %509, %512 : !torch.int, !torch.int -> !torch.int
%514 = torch.prim.ListConstruct %501, %507, %513 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%515 = torch.aten.reshape %494, %514 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_113 = torch.constant.int 2
%none_114 = torch.constant.none
%516 = torch.aten.softmax.int %515, %int2_113, %none_114 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%517 = torch.aten.matmul %516, %462 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%518 = torch.vtensor.literal(dense_resource<__26> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_115 = torch.constant.int 0
%int0_116 = torch.constant.int 0
%519 = torch.aten.select.int %518, %int0_115, %int0_116 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%520 = torch.aten.item %519 : !torch.vtensor<[1],si64> -> !torch.int
%521 = torch.aten.eq.int %520, %int0_115 : !torch.int, !torch.int -> !torch.bool
%522 = torch.aten.Int.bool %521 : !torch.bool -> !torch.int
%int12_117 = torch.constant.int 12
%523 = torch.aten.mul.int %522, %int12_117 : !torch.int, !torch.int -> !torch.int
%524 = torch.aten.add.int %520, %523 : !torch.int, !torch.int -> !torch.int
%int1_118 = torch.constant.int 1
%525 = torch.aten.select.int %518, %int0_115, %int1_118 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%526 = torch.aten.item %525 : !torch.vtensor<[1],si64> -> !torch.int
%527 = torch.aten.eq.int %526, %int0_115 : !torch.int, !torch.int -> !torch.bool
%528 = torch.aten.Int.bool %527 : !torch.bool -> !torch.int
%int6_119 = torch.constant.int 6
%529 = torch.aten.mul.int %528, %int6_119 : !torch.int, !torch.int -> !torch.int
%530 = torch.aten.add.int %526, %529 : !torch.int, !torch.int -> !torch.int
%int2_120 = torch.constant.int 2
%531 = torch.aten.select.int %518, %int0_115, %int2_120 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%532 = torch.aten.item %531 : !torch.vtensor<[1],si64> -> !torch.int
%533 = torch.aten.eq.int %532, %int0_115 : !torch.int, !torch.int -> !torch.bool
%534 = torch.aten.Int.bool %533 : !torch.bool -> !torch.int
%int64 = torch.constant.int 64
%535 = torch.aten.mul.int %534, %int64 : !torch.int, !torch.int -> !torch.int
%536 = torch.aten.add.int %532, %535 : !torch.int, !torch.int -> !torch.int
%int3_121 = torch.constant.int 3
%537 = torch.aten.select.int %518, %int0_115, %int3_121 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%538 = torch.aten.item %537 : !torch.vtensor<[1],si64> -> !torch.int
%539 = torch.aten.eq.int %538, %int0_115 : !torch.int, !torch.int -> !torch.bool
%540 = torch.aten.Int.bool %539 : !torch.bool -> !torch.int
%541 = torch.aten.mul.int %540, %int0_115 : !torch.int, !torch.int -> !torch.int
%542 = torch.aten.add.int %538, %541 : !torch.int, !torch.int -> !torch.int
%543 = torch.prim.ListConstruct %524, %530, %536, %542 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%544 = torch.aten.reshape %517, %543 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_122 = torch.constant.int 1
%int2_123 = torch.constant.int 2
%545 = torch.aten.transpose.int %544, %int1_122, %int2_123 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%546 = torch.vtensor.literal(dense_resource<__27> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_124 = torch.constant.int 0
%int0_125 = torch.constant.int 0
%547 = torch.aten.select.int %546, %int0_124, %int0_125 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%548 = torch.aten.item %547 : !torch.vtensor<[1],si64> -> !torch.int
%549 = torch.aten.eq.int %548, %int0_124 : !torch.int, !torch.int -> !torch.bool
%550 = torch.aten.Int.bool %549 : !torch.bool -> !torch.int
%int1_126 = torch.constant.int 1
%551 = torch.aten.mul.int %550, %int1_126 : !torch.int, !torch.int -> !torch.int
%552 = torch.aten.add.int %548, %551 : !torch.int, !torch.int -> !torch.int
%int1_127 = torch.constant.int 1
%553 = torch.aten.select.int %546, %int0_124, %int1_127 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%554 = torch.aten.item %553 : !torch.vtensor<[1],si64> -> !torch.int
%555 = torch.aten.eq.int %554, %int0_124 : !torch.int, !torch.int -> !torch.bool
%556 = torch.aten.Int.bool %555 : !torch.bool -> !torch.int
%int6_128 = torch.constant.int 6
%557 = torch.aten.mul.int %556, %int6_128 : !torch.int, !torch.int -> !torch.int
%558 = torch.aten.add.int %554, %557 : !torch.int, !torch.int -> !torch.int
%int2_129 = torch.constant.int 2
%559 = torch.aten.select.int %546, %int0_124, %int2_129 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%560 = torch.aten.item %559 : !torch.vtensor<[1],si64> -> !torch.int
%561 = torch.aten.eq.int %560, %int0_124 : !torch.int, !torch.int -> !torch.bool
%562 = torch.aten.Int.bool %561 : !torch.bool -> !torch.int
%int12_130 = torch.constant.int 12
%563 = torch.aten.mul.int %562, %int12_130 : !torch.int, !torch.int -> !torch.int
%564 = torch.aten.add.int %560, %563 : !torch.int, !torch.int -> !torch.int
%565 = torch.prim.ListConstruct %552, %558, %564 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%566 = torch.aten.reshape %545, %565 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%567 = torch.aten.matmul %566, %151 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_131 = torch.constant.int 1
%568 = torch.aten.add.Tensor %7, %567, %int1_131 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_132 = torch.constant.int 1
%569 = torch.aten.add.Tensor %306, %568, %int1_132 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%570 = torch.vtensor.literal(dense_resource<__28> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_133 = torch.constant.int 0
%int0_134 = torch.constant.int 0
%571 = torch.aten.select.int %570, %int0_133, %int0_134 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%572 = torch.aten.item %571 : !torch.vtensor<[1],si64> -> !torch.int
%573 = torch.aten.eq.int %572, %int0_133 : !torch.int, !torch.int -> !torch.bool
%574 = torch.aten.Int.bool %573 : !torch.bool -> !torch.int
%int1_135 = torch.constant.int 1
%575 = torch.aten.mul.int %574, %int1_135 : !torch.int, !torch.int -> !torch.int
%576 = torch.aten.add.int %572, %575 : !torch.int, !torch.int -> !torch.int
%int1_136 = torch.constant.int 1
%577 = torch.aten.select.int %570, %int0_133, %int1_136 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%578 = torch.aten.item %577 : !torch.vtensor<[1],si64> -> !torch.int
%579 = torch.aten.eq.int %578, %int0_133 : !torch.int, !torch.int -> !torch.bool
%580 = torch.aten.Int.bool %579 : !torch.bool -> !torch.int
%int6_137 = torch.constant.int 6
%581 = torch.aten.mul.int %580, %int6_137 : !torch.int, !torch.int -> !torch.int
%582 = torch.aten.add.int %578, %581 : !torch.int, !torch.int -> !torch.int
%583 = torch.prim.ListConstruct %576, %582 : (!torch.int, !torch.int) -> !torch.list<int>
%584 = torch.aten.reshape %569, %583 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_138 = torch.constant.float 9.9999997473787516E-6
%int768_139 = torch.constant.int 768
%585 = torch.prim.ListConstruct %int768_139 : (!torch.int) -> !torch.list<int>
%result0_140, %result1_141, %result2_142 = torch.aten.native_layer_norm %584, %585, %14, %15, %float9.999990e-06_138 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_143 = torch.constant.int 0
%int1_144 = torch.constant.int 1
%586 = torch.aten.transpose.int %10, %int0_143, %int1_144 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%587 = torch.aten.mm %result0_140, %586 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%588 = torch.aten.add.Tensor %587, %11, %int1_144 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%589 = torch.aten.relu %588 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_145 = torch.constant.int 0
%int1_146 = torch.constant.int 1
%590 = torch.aten.transpose.int %12, %int0_145, %int1_146 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%591 = torch.aten.mm %589, %590 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%592 = torch.aten.add.Tensor %591, %13, %int1_146 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_147 = torch.constant.int 1
%593 = torch.aten.add.Tensor %584, %592, %int1_147 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%594 = torch.vtensor.literal(dense_resource<__29> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_148 = torch.constant.int 0
%int0_149 = torch.constant.int 0
%595 = torch.aten.select.int %594, %int0_148, %int0_149 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%596 = torch.aten.item %595 : !torch.vtensor<[1],si64> -> !torch.int
%597 = torch.aten.eq.int %596, %int0_148 : !torch.int, !torch.int -> !torch.bool
%598 = torch.aten.Int.bool %597 : !torch.bool -> !torch.int
%int6_150 = torch.constant.int 6
%599 = torch.aten.mul.int %598, %int6_150 : !torch.int, !torch.int -> !torch.int
%600 = torch.aten.add.int %596, %599 : !torch.int, !torch.int -> !torch.int
%int1_151 = torch.constant.int 1
%601 = torch.aten.select.int %594, %int0_148, %int1_151 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%602 = torch.aten.item %601 : !torch.vtensor<[1],si64> -> !torch.int
%603 = torch.aten.eq.int %602, %int0_148 : !torch.int, !torch.int -> !torch.bool
%604 = torch.aten.Int.bool %603 : !torch.bool -> !torch.int
%int768_152 = torch.constant.int 768
%605 = torch.aten.mul.int %604, %int768_152 : !torch.int, !torch.int -> !torch.int
%606 = torch.aten.add.int %602, %605 : !torch.int, !torch.int -> !torch.int
%int2_153 = torch.constant.int 2
%607 = torch.aten.select.int %594, %int0_148, %int2_153 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%608 = torch.aten.item %607 : !torch.vtensor<[1],si64> -> !torch.int
%609 = torch.aten.eq.int %608, %int0_148 : !torch.int, !torch.int -> !torch.bool
%610 = torch.aten.Int.bool %609 : !torch.bool -> !torch.int
%611 = torch.aten.mul.int %610, %int0_148 : !torch.int, !torch.int -> !torch.int
%612 = torch.aten.add.int %608, %611 : !torch.int, !torch.int -> !torch.int
%613 = torch.prim.ListConstruct %600, %606, %612 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%614 = torch.aten.reshape %593, %613 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_154 = torch.constant.float 9.9999997473787516E-6
%int768_155 = torch.constant.int 768
%615 = torch.prim.ListConstruct %int768_155 : (!torch.int) -> !torch.list<int>
%result0_156, %result1_157, %result2_158 = torch.aten.native_layer_norm %614, %615, %20, %21, %float9.999990e-06_154 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%616 = torch.aten.matmul %result0_156, %152 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_159 = torch.constant.int 1
%617 = torch.aten.add.Tensor %18, %616, %int1_159 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%618 = torch.vtensor.literal(dense_resource<__30> : tensor<f32>) : !torch.vtensor<[],f32>
%619 = torch.aten.mul.Tensor %617, %618 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%620 = torch.aten.matmul %result0_156, %153 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_160 = torch.constant.int 1
%621 = torch.aten.add.Tensor %16, %620, %int1_160 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%622 = torch.vtensor.literal(dense_resource<__31> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%623 = torch.vtensor.literal(dense_resource<__32> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_161 = torch.constant.int 0
%int0_162 = torch.constant.int 0
%624 = torch.aten.select.int %622, %int0_161, %int0_162 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%625 = torch.aten.item %624 : !torch.vtensor<[1],si64> -> !torch.int
%626 = torch.aten.eq.int %625, %int0_161 : !torch.int, !torch.int -> !torch.bool
%627 = torch.aten.Int.bool %626 : !torch.bool -> !torch.int
%int1_163 = torch.constant.int 1
%628 = torch.aten.mul.int %627, %int1_163 : !torch.int, !torch.int -> !torch.int
%629 = torch.aten.add.int %625, %628 : !torch.int, !torch.int -> !torch.int
%int1_164 = torch.constant.int 1
%630 = torch.aten.select.int %622, %int0_161, %int1_164 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%631 = torch.aten.item %630 : !torch.vtensor<[1],si64> -> !torch.int
%632 = torch.aten.eq.int %631, %int0_161 : !torch.int, !torch.int -> !torch.bool
%633 = torch.aten.Int.bool %632 : !torch.bool -> !torch.int
%int6_165 = torch.constant.int 6
%634 = torch.aten.mul.int %633, %int6_165 : !torch.int, !torch.int -> !torch.int
%635 = torch.aten.add.int %631, %634 : !torch.int, !torch.int -> !torch.int
%int2_166 = torch.constant.int 2
%636 = torch.aten.select.int %622, %int0_161, %int2_166 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%637 = torch.aten.item %636 : !torch.vtensor<[1],si64> -> !torch.int
%638 = torch.aten.eq.int %637, %int0_161 : !torch.int, !torch.int -> !torch.bool
%639 = torch.aten.Int.bool %638 : !torch.bool -> !torch.int
%int768_167 = torch.constant.int 768
%640 = torch.aten.mul.int %639, %int768_167 : !torch.int, !torch.int -> !torch.int
%641 = torch.aten.add.int %637, %640 : !torch.int, !torch.int -> !torch.int
%int3_168 = torch.constant.int 3
%642 = torch.aten.select.int %622, %int0_161, %int3_168 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%643 = torch.aten.item %642 : !torch.vtensor<[1],si64> -> !torch.int
%644 = torch.aten.eq.int %643, %int0_161 : !torch.int, !torch.int -> !torch.bool
%645 = torch.aten.Int.bool %644 : !torch.bool -> !torch.int
%646 = torch.aten.mul.int %645, %int0_161 : !torch.int, !torch.int -> !torch.int
%647 = torch.aten.add.int %643, %646 : !torch.int, !torch.int -> !torch.int
%648 = torch.prim.ListConstruct %629, %635, %641, %647 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%649 = torch.aten.reshape %621, %648 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_169 = torch.constant.int 1
%int2_170 = torch.constant.int 2
%650 = torch.aten.transpose.int %649, %int1_169, %int2_170 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%651 = torch.aten.matmul %result0_156, %154 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_171 = torch.constant.int 1
%652 = torch.aten.add.Tensor %17, %651, %int1_171 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_172 = torch.constant.int 0
%int0_173 = torch.constant.int 0
%653 = torch.aten.select.int %623, %int0_172, %int0_173 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%654 = torch.aten.item %653 : !torch.vtensor<[1],si64> -> !torch.int
%655 = torch.aten.eq.int %654, %int0_172 : !torch.int, !torch.int -> !torch.bool
%656 = torch.aten.Int.bool %655 : !torch.bool -> !torch.int
%int1_174 = torch.constant.int 1
%657 = torch.aten.mul.int %656, %int1_174 : !torch.int, !torch.int -> !torch.int
%658 = torch.aten.add.int %654, %657 : !torch.int, !torch.int -> !torch.int
%int1_175 = torch.constant.int 1
%659 = torch.aten.select.int %623, %int0_172, %int1_175 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%660 = torch.aten.item %659 : !torch.vtensor<[1],si64> -> !torch.int
%661 = torch.aten.eq.int %660, %int0_172 : !torch.int, !torch.int -> !torch.bool
%662 = torch.aten.Int.bool %661 : !torch.bool -> !torch.int
%int6_176 = torch.constant.int 6
%663 = torch.aten.mul.int %662, %int6_176 : !torch.int, !torch.int -> !torch.int
%664 = torch.aten.add.int %660, %663 : !torch.int, !torch.int -> !torch.int
%int2_177 = torch.constant.int 2
%665 = torch.aten.select.int %623, %int0_172, %int2_177 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%666 = torch.aten.item %665 : !torch.vtensor<[1],si64> -> !torch.int
%667 = torch.aten.eq.int %666, %int0_172 : !torch.int, !torch.int -> !torch.bool
%668 = torch.aten.Int.bool %667 : !torch.bool -> !torch.int
%int768_178 = torch.constant.int 768
%669 = torch.aten.mul.int %668, %int768_178 : !torch.int, !torch.int -> !torch.int
%670 = torch.aten.add.int %666, %669 : !torch.int, !torch.int -> !torch.int
%int3_179 = torch.constant.int 3
%671 = torch.aten.select.int %623, %int0_172, %int3_179 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%672 = torch.aten.item %671 : !torch.vtensor<[1],si64> -> !torch.int
%673 = torch.aten.eq.int %672, %int0_172 : !torch.int, !torch.int -> !torch.bool
%674 = torch.aten.Int.bool %673 : !torch.bool -> !torch.int
%675 = torch.aten.mul.int %674, %int0_172 : !torch.int, !torch.int -> !torch.int
%676 = torch.aten.add.int %672, %675 : !torch.int, !torch.int -> !torch.int
%677 = torch.prim.ListConstruct %658, %664, %670, %676 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%678 = torch.aten.reshape %652, %677 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_180 = torch.constant.int 1
%int2_181 = torch.constant.int 2
%679 = torch.aten.transpose.int %678, %int1_180, %int2_181 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%680 = torch.vtensor.literal(dense_resource<__33> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_182 = torch.constant.int 0
%int0_183 = torch.constant.int 0
%681 = torch.aten.select.int %680, %int0_182, %int0_183 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%682 = torch.aten.item %681 : !torch.vtensor<[1],si64> -> !torch.int
%683 = torch.aten.eq.int %682, %int0_182 : !torch.int, !torch.int -> !torch.bool
%684 = torch.aten.Int.bool %683 : !torch.bool -> !torch.int
%int1_184 = torch.constant.int 1
%685 = torch.aten.mul.int %684, %int1_184 : !torch.int, !torch.int -> !torch.int
%686 = torch.aten.add.int %682, %685 : !torch.int, !torch.int -> !torch.int
%int1_185 = torch.constant.int 1
%687 = torch.aten.select.int %680, %int0_182, %int1_185 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%688 = torch.aten.item %687 : !torch.vtensor<[1],si64> -> !torch.int
%689 = torch.aten.eq.int %688, %int0_182 : !torch.int, !torch.int -> !torch.bool
%690 = torch.aten.Int.bool %689 : !torch.bool -> !torch.int
%int6_186 = torch.constant.int 6
%691 = torch.aten.mul.int %690, %int6_186 : !torch.int, !torch.int -> !torch.int
%692 = torch.aten.add.int %688, %691 : !torch.int, !torch.int -> !torch.int
%int2_187 = torch.constant.int 2
%693 = torch.aten.select.int %680, %int0_182, %int2_187 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%694 = torch.aten.item %693 : !torch.vtensor<[1],si64> -> !torch.int
%695 = torch.aten.eq.int %694, %int0_182 : !torch.int, !torch.int -> !torch.bool
%696 = torch.aten.Int.bool %695 : !torch.bool -> !torch.int
%int768_188 = torch.constant.int 768
%697 = torch.aten.mul.int %696, %int768_188 : !torch.int, !torch.int -> !torch.int
%698 = torch.aten.add.int %694, %697 : !torch.int, !torch.int -> !torch.int
%int3_189 = torch.constant.int 3
%699 = torch.aten.select.int %680, %int0_182, %int3_189 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%700 = torch.aten.item %699 : !torch.vtensor<[1],si64> -> !torch.int
%701 = torch.aten.eq.int %700, %int0_182 : !torch.int, !torch.int -> !torch.bool
%702 = torch.aten.Int.bool %701 : !torch.bool -> !torch.int
%703 = torch.aten.mul.int %702, %int0_182 : !torch.int, !torch.int -> !torch.int
%704 = torch.aten.add.int %700, %703 : !torch.int, !torch.int -> !torch.int
%705 = torch.prim.ListConstruct %686, %692, %698, %704 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%706 = torch.aten.reshape %619, %705 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_190 = torch.constant.int 1
%int2_191 = torch.constant.int 2
%707 = torch.aten.transpose.int %706, %int1_190, %int2_191 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%708 = torch.vtensor.literal(dense_resource<__34> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%709 = torch.vtensor.literal(dense_resource<__35> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%710 = torch.vtensor.literal(dense_resource<__36> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_192 = torch.constant.int 0
%int0_193 = torch.constant.int 0
%711 = torch.aten.select.int %708, %int0_192, %int0_193 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%712 = torch.aten.item %711 : !torch.vtensor<[1],si64> -> !torch.int
%713 = torch.aten.eq.int %712, %int0_192 : !torch.int, !torch.int -> !torch.bool
%714 = torch.aten.Int.bool %713 : !torch.bool -> !torch.int
%int1_194 = torch.constant.int 1
%715 = torch.aten.mul.int %714, %int1_194 : !torch.int, !torch.int -> !torch.int
%716 = torch.aten.add.int %712, %715 : !torch.int, !torch.int -> !torch.int
%int1_195 = torch.constant.int 1
%717 = torch.aten.select.int %708, %int0_192, %int1_195 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%718 = torch.aten.item %717 : !torch.vtensor<[1],si64> -> !torch.int
%719 = torch.aten.eq.int %718, %int0_192 : !torch.int, !torch.int -> !torch.bool
%720 = torch.aten.Int.bool %719 : !torch.bool -> !torch.int
%int12_196 = torch.constant.int 12
%721 = torch.aten.mul.int %720, %int12_196 : !torch.int, !torch.int -> !torch.int
%722 = torch.aten.add.int %718, %721 : !torch.int, !torch.int -> !torch.int
%int2_197 = torch.constant.int 2
%723 = torch.aten.select.int %708, %int0_192, %int2_197 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%724 = torch.aten.item %723 : !torch.vtensor<[1],si64> -> !torch.int
%725 = torch.aten.eq.int %724, %int0_192 : !torch.int, !torch.int -> !torch.bool
%726 = torch.aten.Int.bool %725 : !torch.bool -> !torch.int
%int6_198 = torch.constant.int 6
%727 = torch.aten.mul.int %726, %int6_198 : !torch.int, !torch.int -> !torch.int
%728 = torch.aten.add.int %724, %727 : !torch.int, !torch.int -> !torch.int
%729 = torch.prim.ListConstruct %716, %722, %728 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%730 = torch.aten.reshape %707, %729 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_199 = torch.constant.int 0
%int0_200 = torch.constant.int 0
%731 = torch.aten.select.int %709, %int0_199, %int0_200 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%732 = torch.aten.item %731 : !torch.vtensor<[1],si64> -> !torch.int
%733 = torch.aten.eq.int %732, %int0_199 : !torch.int, !torch.int -> !torch.bool
%734 = torch.aten.Int.bool %733 : !torch.bool -> !torch.int
%int1_201 = torch.constant.int 1
%735 = torch.aten.mul.int %734, %int1_201 : !torch.int, !torch.int -> !torch.int
%736 = torch.aten.add.int %732, %735 : !torch.int, !torch.int -> !torch.int
%int1_202 = torch.constant.int 1
%737 = torch.aten.select.int %709, %int0_199, %int1_202 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%738 = torch.aten.item %737 : !torch.vtensor<[1],si64> -> !torch.int
%739 = torch.aten.eq.int %738, %int0_199 : !torch.int, !torch.int -> !torch.bool
%740 = torch.aten.Int.bool %739 : !torch.bool -> !torch.int
%int12_203 = torch.constant.int 12
%741 = torch.aten.mul.int %740, %int12_203 : !torch.int, !torch.int -> !torch.int
%742 = torch.aten.add.int %738, %741 : !torch.int, !torch.int -> !torch.int
%int2_204 = torch.constant.int 2
%743 = torch.aten.select.int %709, %int0_199, %int2_204 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%744 = torch.aten.item %743 : !torch.vtensor<[1],si64> -> !torch.int
%745 = torch.aten.eq.int %744, %int0_199 : !torch.int, !torch.int -> !torch.bool
%746 = torch.aten.Int.bool %745 : !torch.bool -> !torch.int
%int6_205 = torch.constant.int 6
%747 = torch.aten.mul.int %746, %int6_205 : !torch.int, !torch.int -> !torch.int
%748 = torch.aten.add.int %744, %747 : !torch.int, !torch.int -> !torch.int
%749 = torch.prim.ListConstruct %736, %742, %748 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%750 = torch.aten.reshape %650, %749 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_206 = torch.constant.int 0
%int0_207 = torch.constant.int 0
%751 = torch.aten.select.int %710, %int0_206, %int0_207 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%752 = torch.aten.item %751 : !torch.vtensor<[1],si64> -> !torch.int
%753 = torch.aten.eq.int %752, %int0_206 : !torch.int, !torch.int -> !torch.bool
%754 = torch.aten.Int.bool %753 : !torch.bool -> !torch.int
%int1_208 = torch.constant.int 1
%755 = torch.aten.mul.int %754, %int1_208 : !torch.int, !torch.int -> !torch.int
%756 = torch.aten.add.int %752, %755 : !torch.int, !torch.int -> !torch.int
%int1_209 = torch.constant.int 1
%757 = torch.aten.select.int %710, %int0_206, %int1_209 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%758 = torch.aten.item %757 : !torch.vtensor<[1],si64> -> !torch.int
%759 = torch.aten.eq.int %758, %int0_206 : !torch.int, !torch.int -> !torch.bool
%760 = torch.aten.Int.bool %759 : !torch.bool -> !torch.int
%int12_210 = torch.constant.int 12
%761 = torch.aten.mul.int %760, %int12_210 : !torch.int, !torch.int -> !torch.int
%762 = torch.aten.add.int %758, %761 : !torch.int, !torch.int -> !torch.int
%int2_211 = torch.constant.int 2
%763 = torch.aten.select.int %710, %int0_206, %int2_211 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%764 = torch.aten.item %763 : !torch.vtensor<[1],si64> -> !torch.int
%765 = torch.aten.eq.int %764, %int0_206 : !torch.int, !torch.int -> !torch.bool
%766 = torch.aten.Int.bool %765 : !torch.bool -> !torch.int
%int6_212 = torch.constant.int 6
%767 = torch.aten.mul.int %766, %int6_212 : !torch.int, !torch.int -> !torch.int
%768 = torch.aten.add.int %764, %767 : !torch.int, !torch.int -> !torch.int
%769 = torch.prim.ListConstruct %756, %762, %768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%770 = torch.aten.reshape %679, %769 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_213 = torch.constant.int 1
%int2_214 = torch.constant.int 2
%771 = torch.aten.transpose.int %750, %int1_213, %int2_214 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%772 = torch.aten.matmul %730, %771 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%773 = torch.vtensor.literal(dense_resource<__37> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_215 = torch.constant.int 0
%int0_216 = torch.constant.int 0
%774 = torch.aten.select.int %773, %int0_215, %int0_216 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%775 = torch.aten.item %774 : !torch.vtensor<[1],si64> -> !torch.int
%776 = torch.aten.eq.int %775, %int0_215 : !torch.int, !torch.int -> !torch.bool
%777 = torch.aten.Int.bool %776 : !torch.bool -> !torch.int
%int12_217 = torch.constant.int 12
%778 = torch.aten.mul.int %777, %int12_217 : !torch.int, !torch.int -> !torch.int
%779 = torch.aten.add.int %775, %778 : !torch.int, !torch.int -> !torch.int
%int1_218 = torch.constant.int 1
%780 = torch.aten.select.int %773, %int0_215, %int1_218 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%781 = torch.aten.item %780 : !torch.vtensor<[1],si64> -> !torch.int
%782 = torch.aten.eq.int %781, %int0_215 : !torch.int, !torch.int -> !torch.bool
%783 = torch.aten.Int.bool %782 : !torch.bool -> !torch.int
%int6_219 = torch.constant.int 6
%784 = torch.aten.mul.int %783, %int6_219 : !torch.int, !torch.int -> !torch.int
%785 = torch.aten.add.int %781, %784 : !torch.int, !torch.int -> !torch.int
%int2_220 = torch.constant.int 2
%786 = torch.aten.select.int %773, %int0_215, %int2_220 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%787 = torch.aten.item %786 : !torch.vtensor<[1],si64> -> !torch.int
%788 = torch.aten.eq.int %787, %int0_215 : !torch.int, !torch.int -> !torch.bool
%789 = torch.aten.Int.bool %788 : !torch.bool -> !torch.int
%int6_221 = torch.constant.int 6
%790 = torch.aten.mul.int %789, %int6_221 : !torch.int, !torch.int -> !torch.int
%791 = torch.aten.add.int %787, %790 : !torch.int, !torch.int -> !torch.int
%int3_222 = torch.constant.int 3
%792 = torch.aten.select.int %773, %int0_215, %int3_222 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%793 = torch.aten.item %792 : !torch.vtensor<[1],si64> -> !torch.int
%794 = torch.aten.eq.int %793, %int0_215 : !torch.int, !torch.int -> !torch.bool
%795 = torch.aten.Int.bool %794 : !torch.bool -> !torch.int
%796 = torch.aten.mul.int %795, %int0_215 : !torch.int, !torch.int -> !torch.int
%797 = torch.aten.add.int %793, %796 : !torch.int, !torch.int -> !torch.int
%798 = torch.prim.ListConstruct %779, %785, %791, %797 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%799 = torch.aten.reshape %772, %798 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_223 = torch.constant.int 1
%800 = torch.aten.add.Tensor %799, %277, %int1_223 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%801 = torch.vtensor.literal(dense_resource<__38> : tensor<f32>) : !torch.vtensor<[],f32>
%802 = torch.aten.maximum %800, %801 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%803 = torch.vtensor.literal(dense_resource<__39> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_224 = torch.constant.int 0
%int0_225 = torch.constant.int 0
%804 = torch.aten.select.int %803, %int0_224, %int0_225 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%805 = torch.aten.item %804 : !torch.vtensor<[1],si64> -> !torch.int
%806 = torch.aten.eq.int %805, %int0_224 : !torch.int, !torch.int -> !torch.bool
%807 = torch.aten.Int.bool %806 : !torch.bool -> !torch.int
%int-1_226 = torch.constant.int -1
%808 = torch.aten.mul.int %807, %int-1_226 : !torch.int, !torch.int -> !torch.int
%809 = torch.aten.add.int %805, %808 : !torch.int, !torch.int -> !torch.int
%int1_227 = torch.constant.int 1
%810 = torch.aten.select.int %803, %int0_224, %int1_227 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%811 = torch.aten.item %810 : !torch.vtensor<[1],si64> -> !torch.int
%812 = torch.aten.eq.int %811, %int0_224 : !torch.int, !torch.int -> !torch.bool
%813 = torch.aten.Int.bool %812 : !torch.bool -> !torch.int
%int12_228 = torch.constant.int 12
%814 = torch.aten.mul.int %813, %int12_228 : !torch.int, !torch.int -> !torch.int
%815 = torch.aten.add.int %811, %814 : !torch.int, !torch.int -> !torch.int
%int2_229 = torch.constant.int 2
%816 = torch.aten.select.int %803, %int0_224, %int2_229 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%817 = torch.aten.item %816 : !torch.vtensor<[1],si64> -> !torch.int
%818 = torch.aten.eq.int %817, %int0_224 : !torch.int, !torch.int -> !torch.bool
%819 = torch.aten.Int.bool %818 : !torch.bool -> !torch.int
%int6_230 = torch.constant.int 6
%820 = torch.aten.mul.int %819, %int6_230 : !torch.int, !torch.int -> !torch.int
%821 = torch.aten.add.int %817, %820 : !torch.int, !torch.int -> !torch.int
%822 = torch.prim.ListConstruct %809, %815, %821 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%823 = torch.aten.reshape %802, %822 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_231 = torch.constant.int 2
%none_232 = torch.constant.none
%824 = torch.aten.softmax.int %823, %int2_231, %none_232 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%825 = torch.aten.matmul %824, %770 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%826 = torch.vtensor.literal(dense_resource<__40> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_233 = torch.constant.int 0
%int0_234 = torch.constant.int 0
%827 = torch.aten.select.int %826, %int0_233, %int0_234 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%828 = torch.aten.item %827 : !torch.vtensor<[1],si64> -> !torch.int
%829 = torch.aten.eq.int %828, %int0_233 : !torch.int, !torch.int -> !torch.bool
%830 = torch.aten.Int.bool %829 : !torch.bool -> !torch.int
%int12_235 = torch.constant.int 12
%831 = torch.aten.mul.int %830, %int12_235 : !torch.int, !torch.int -> !torch.int
%832 = torch.aten.add.int %828, %831 : !torch.int, !torch.int -> !torch.int
%int1_236 = torch.constant.int 1
%833 = torch.aten.select.int %826, %int0_233, %int1_236 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%834 = torch.aten.item %833 : !torch.vtensor<[1],si64> -> !torch.int
%835 = torch.aten.eq.int %834, %int0_233 : !torch.int, !torch.int -> !torch.bool
%836 = torch.aten.Int.bool %835 : !torch.bool -> !torch.int
%int6_237 = torch.constant.int 6
%837 = torch.aten.mul.int %836, %int6_237 : !torch.int, !torch.int -> !torch.int
%838 = torch.aten.add.int %834, %837 : !torch.int, !torch.int -> !torch.int
%int2_238 = torch.constant.int 2
%839 = torch.aten.select.int %826, %int0_233, %int2_238 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%840 = torch.aten.item %839 : !torch.vtensor<[1],si64> -> !torch.int
%841 = torch.aten.eq.int %840, %int0_233 : !torch.int, !torch.int -> !torch.bool
%842 = torch.aten.Int.bool %841 : !torch.bool -> !torch.int
%int64_239 = torch.constant.int 64
%843 = torch.aten.mul.int %842, %int64_239 : !torch.int, !torch.int -> !torch.int
%844 = torch.aten.add.int %840, %843 : !torch.int, !torch.int -> !torch.int
%int3_240 = torch.constant.int 3
%845 = torch.aten.select.int %826, %int0_233, %int3_240 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%846 = torch.aten.item %845 : !torch.vtensor<[1],si64> -> !torch.int
%847 = torch.aten.eq.int %846, %int0_233 : !torch.int, !torch.int -> !torch.bool
%848 = torch.aten.Int.bool %847 : !torch.bool -> !torch.int
%849 = torch.aten.mul.int %848, %int0_233 : !torch.int, !torch.int -> !torch.int
%850 = torch.aten.add.int %846, %849 : !torch.int, !torch.int -> !torch.int
%851 = torch.prim.ListConstruct %832, %838, %844, %850 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%852 = torch.aten.reshape %825, %851 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_241 = torch.constant.int 1
%int2_242 = torch.constant.int 2
%853 = torch.aten.transpose.int %852, %int1_241, %int2_242 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%854 = torch.vtensor.literal(dense_resource<__41> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_243 = torch.constant.int 0
%int0_244 = torch.constant.int 0
%855 = torch.aten.select.int %854, %int0_243, %int0_244 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%856 = torch.aten.item %855 : !torch.vtensor<[1],si64> -> !torch.int
%857 = torch.aten.eq.int %856, %int0_243 : !torch.int, !torch.int -> !torch.bool
%858 = torch.aten.Int.bool %857 : !torch.bool -> !torch.int
%int1_245 = torch.constant.int 1
%859 = torch.aten.mul.int %858, %int1_245 : !torch.int, !torch.int -> !torch.int
%860 = torch.aten.add.int %856, %859 : !torch.int, !torch.int -> !torch.int
%int1_246 = torch.constant.int 1
%861 = torch.aten.select.int %854, %int0_243, %int1_246 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%862 = torch.aten.item %861 : !torch.vtensor<[1],si64> -> !torch.int
%863 = torch.aten.eq.int %862, %int0_243 : !torch.int, !torch.int -> !torch.bool
%864 = torch.aten.Int.bool %863 : !torch.bool -> !torch.int
%int6_247 = torch.constant.int 6
%865 = torch.aten.mul.int %864, %int6_247 : !torch.int, !torch.int -> !torch.int
%866 = torch.aten.add.int %862, %865 : !torch.int, !torch.int -> !torch.int
%int2_248 = torch.constant.int 2
%867 = torch.aten.select.int %854, %int0_243, %int2_248 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%868 = torch.aten.item %867 : !torch.vtensor<[1],si64> -> !torch.int
%869 = torch.aten.eq.int %868, %int0_243 : !torch.int, !torch.int -> !torch.bool
%870 = torch.aten.Int.bool %869 : !torch.bool -> !torch.int
%int12_249 = torch.constant.int 12
%871 = torch.aten.mul.int %870, %int12_249 : !torch.int, !torch.int -> !torch.int
%872 = torch.aten.add.int %868, %871 : !torch.int, !torch.int -> !torch.int
%873 = torch.prim.ListConstruct %860, %866, %872 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%874 = torch.aten.reshape %853, %873 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%875 = torch.aten.matmul %874, %155 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_250 = torch.constant.int 1
%876 = torch.aten.add.Tensor %19, %875, %int1_250 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_251 = torch.constant.int 1
%877 = torch.aten.add.Tensor %614, %876, %int1_251 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%878 = torch.vtensor.literal(dense_resource<__42> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_252 = torch.constant.int 0
%int0_253 = torch.constant.int 0
%879 = torch.aten.select.int %878, %int0_252, %int0_253 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%880 = torch.aten.item %879 : !torch.vtensor<[1],si64> -> !torch.int
%881 = torch.aten.eq.int %880, %int0_252 : !torch.int, !torch.int -> !torch.bool
%882 = torch.aten.Int.bool %881 : !torch.bool -> !torch.int
%int1_254 = torch.constant.int 1
%883 = torch.aten.mul.int %882, %int1_254 : !torch.int, !torch.int -> !torch.int
%884 = torch.aten.add.int %880, %883 : !torch.int, !torch.int -> !torch.int
%int1_255 = torch.constant.int 1
%885 = torch.aten.select.int %878, %int0_252, %int1_255 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%886 = torch.aten.item %885 : !torch.vtensor<[1],si64> -> !torch.int
%887 = torch.aten.eq.int %886, %int0_252 : !torch.int, !torch.int -> !torch.bool
%888 = torch.aten.Int.bool %887 : !torch.bool -> !torch.int
%int6_256 = torch.constant.int 6
%889 = torch.aten.mul.int %888, %int6_256 : !torch.int, !torch.int -> !torch.int
%890 = torch.aten.add.int %886, %889 : !torch.int, !torch.int -> !torch.int
%891 = torch.prim.ListConstruct %884, %890 : (!torch.int, !torch.int) -> !torch.list<int>
%892 = torch.aten.reshape %877, %891 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_257 = torch.constant.float 9.9999997473787516E-6
%int768_258 = torch.constant.int 768
%893 = torch.prim.ListConstruct %int768_258 : (!torch.int) -> !torch.list<int>
%result0_259, %result1_260, %result2_261 = torch.aten.native_layer_norm %892, %893, %26, %27, %float9.999990e-06_257 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_262 = torch.constant.int 0
%int1_263 = torch.constant.int 1
%894 = torch.aten.transpose.int %22, %int0_262, %int1_263 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%895 = torch.aten.mm %result0_259, %894 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%896 = torch.aten.add.Tensor %895, %23, %int1_263 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%897 = torch.aten.relu %896 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_264 = torch.constant.int 0
%int1_265 = torch.constant.int 1
%898 = torch.aten.transpose.int %24, %int0_264, %int1_265 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%899 = torch.aten.mm %897, %898 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%900 = torch.aten.add.Tensor %899, %25, %int1_265 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_266 = torch.constant.int 1
%901 = torch.aten.add.Tensor %892, %900, %int1_266 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%902 = torch.vtensor.literal(dense_resource<__43> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_267 = torch.constant.int 0
%int0_268 = torch.constant.int 0
%903 = torch.aten.select.int %902, %int0_267, %int0_268 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%904 = torch.aten.item %903 : !torch.vtensor<[1],si64> -> !torch.int
%905 = torch.aten.eq.int %904, %int0_267 : !torch.int, !torch.int -> !torch.bool
%906 = torch.aten.Int.bool %905 : !torch.bool -> !torch.int
%int6_269 = torch.constant.int 6
%907 = torch.aten.mul.int %906, %int6_269 : !torch.int, !torch.int -> !torch.int
%908 = torch.aten.add.int %904, %907 : !torch.int, !torch.int -> !torch.int
%int1_270 = torch.constant.int 1
%909 = torch.aten.select.int %902, %int0_267, %int1_270 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%910 = torch.aten.item %909 : !torch.vtensor<[1],si64> -> !torch.int
%911 = torch.aten.eq.int %910, %int0_267 : !torch.int, !torch.int -> !torch.bool
%912 = torch.aten.Int.bool %911 : !torch.bool -> !torch.int
%int768_271 = torch.constant.int 768
%913 = torch.aten.mul.int %912, %int768_271 : !torch.int, !torch.int -> !torch.int
%914 = torch.aten.add.int %910, %913 : !torch.int, !torch.int -> !torch.int
%int2_272 = torch.constant.int 2
%915 = torch.aten.select.int %902, %int0_267, %int2_272 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%916 = torch.aten.item %915 : !torch.vtensor<[1],si64> -> !torch.int
%917 = torch.aten.eq.int %916, %int0_267 : !torch.int, !torch.int -> !torch.bool
%918 = torch.aten.Int.bool %917 : !torch.bool -> !torch.int
%919 = torch.aten.mul.int %918, %int0_267 : !torch.int, !torch.int -> !torch.int
%920 = torch.aten.add.int %916, %919 : !torch.int, !torch.int -> !torch.int
%921 = torch.prim.ListConstruct %908, %914, %920 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%922 = torch.aten.reshape %901, %921 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_273 = torch.constant.float 9.9999997473787516E-6
%int768_274 = torch.constant.int 768
%923 = torch.prim.ListConstruct %int768_274 : (!torch.int) -> !torch.list<int>
%result0_275, %result1_276, %result2_277 = torch.aten.native_layer_norm %922, %923, %32, %33, %float9.999990e-06_273 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%924 = torch.aten.matmul %result0_275, %156 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_278 = torch.constant.int 1
%925 = torch.aten.add.Tensor %30, %924, %int1_278 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%926 = torch.vtensor.literal(dense_resource<__44> : tensor<f32>) : !torch.vtensor<[],f32>
%927 = torch.aten.mul.Tensor %925, %926 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%928 = torch.aten.matmul %result0_275, %157 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_279 = torch.constant.int 1
%929 = torch.aten.add.Tensor %28, %928, %int1_279 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%930 = torch.vtensor.literal(dense_resource<__45> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%931 = torch.vtensor.literal(dense_resource<__46> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_280 = torch.constant.int 0
%int0_281 = torch.constant.int 0
%932 = torch.aten.select.int %930, %int0_280, %int0_281 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%933 = torch.aten.item %932 : !torch.vtensor<[1],si64> -> !torch.int
%934 = torch.aten.eq.int %933, %int0_280 : !torch.int, !torch.int -> !torch.bool
%935 = torch.aten.Int.bool %934 : !torch.bool -> !torch.int
%int1_282 = torch.constant.int 1
%936 = torch.aten.mul.int %935, %int1_282 : !torch.int, !torch.int -> !torch.int
%937 = torch.aten.add.int %933, %936 : !torch.int, !torch.int -> !torch.int
%int1_283 = torch.constant.int 1
%938 = torch.aten.select.int %930, %int0_280, %int1_283 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%939 = torch.aten.item %938 : !torch.vtensor<[1],si64> -> !torch.int
%940 = torch.aten.eq.int %939, %int0_280 : !torch.int, !torch.int -> !torch.bool
%941 = torch.aten.Int.bool %940 : !torch.bool -> !torch.int
%int6_284 = torch.constant.int 6
%942 = torch.aten.mul.int %941, %int6_284 : !torch.int, !torch.int -> !torch.int
%943 = torch.aten.add.int %939, %942 : !torch.int, !torch.int -> !torch.int
%int2_285 = torch.constant.int 2
%944 = torch.aten.select.int %930, %int0_280, %int2_285 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%945 = torch.aten.item %944 : !torch.vtensor<[1],si64> -> !torch.int
%946 = torch.aten.eq.int %945, %int0_280 : !torch.int, !torch.int -> !torch.bool
%947 = torch.aten.Int.bool %946 : !torch.bool -> !torch.int
%int768_286 = torch.constant.int 768
%948 = torch.aten.mul.int %947, %int768_286 : !torch.int, !torch.int -> !torch.int
%949 = torch.aten.add.int %945, %948 : !torch.int, !torch.int -> !torch.int
%int3_287 = torch.constant.int 3
%950 = torch.aten.select.int %930, %int0_280, %int3_287 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%951 = torch.aten.item %950 : !torch.vtensor<[1],si64> -> !torch.int
%952 = torch.aten.eq.int %951, %int0_280 : !torch.int, !torch.int -> !torch.bool
%953 = torch.aten.Int.bool %952 : !torch.bool -> !torch.int
%954 = torch.aten.mul.int %953, %int0_280 : !torch.int, !torch.int -> !torch.int
%955 = torch.aten.add.int %951, %954 : !torch.int, !torch.int -> !torch.int
%956 = torch.prim.ListConstruct %937, %943, %949, %955 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%957 = torch.aten.reshape %929, %956 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_288 = torch.constant.int 1
%int2_289 = torch.constant.int 2
%958 = torch.aten.transpose.int %957, %int1_288, %int2_289 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%959 = torch.aten.matmul %result0_275, %158 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_290 = torch.constant.int 1
%960 = torch.aten.add.Tensor %29, %959, %int1_290 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_291 = torch.constant.int 0
%int0_292 = torch.constant.int 0
%961 = torch.aten.select.int %931, %int0_291, %int0_292 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%962 = torch.aten.item %961 : !torch.vtensor<[1],si64> -> !torch.int
%963 = torch.aten.eq.int %962, %int0_291 : !torch.int, !torch.int -> !torch.bool
%964 = torch.aten.Int.bool %963 : !torch.bool -> !torch.int
%int1_293 = torch.constant.int 1
%965 = torch.aten.mul.int %964, %int1_293 : !torch.int, !torch.int -> !torch.int
%966 = torch.aten.add.int %962, %965 : !torch.int, !torch.int -> !torch.int
%int1_294 = torch.constant.int 1
%967 = torch.aten.select.int %931, %int0_291, %int1_294 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%968 = torch.aten.item %967 : !torch.vtensor<[1],si64> -> !torch.int
%969 = torch.aten.eq.int %968, %int0_291 : !torch.int, !torch.int -> !torch.bool
%970 = torch.aten.Int.bool %969 : !torch.bool -> !torch.int
%int6_295 = torch.constant.int 6
%971 = torch.aten.mul.int %970, %int6_295 : !torch.int, !torch.int -> !torch.int
%972 = torch.aten.add.int %968, %971 : !torch.int, !torch.int -> !torch.int
%int2_296 = torch.constant.int 2
%973 = torch.aten.select.int %931, %int0_291, %int2_296 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%974 = torch.aten.item %973 : !torch.vtensor<[1],si64> -> !torch.int
%975 = torch.aten.eq.int %974, %int0_291 : !torch.int, !torch.int -> !torch.bool
%976 = torch.aten.Int.bool %975 : !torch.bool -> !torch.int
%int768_297 = torch.constant.int 768
%977 = torch.aten.mul.int %976, %int768_297 : !torch.int, !torch.int -> !torch.int
%978 = torch.aten.add.int %974, %977 : !torch.int, !torch.int -> !torch.int
%int3_298 = torch.constant.int 3
%979 = torch.aten.select.int %931, %int0_291, %int3_298 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%980 = torch.aten.item %979 : !torch.vtensor<[1],si64> -> !torch.int
%981 = torch.aten.eq.int %980, %int0_291 : !torch.int, !torch.int -> !torch.bool
%982 = torch.aten.Int.bool %981 : !torch.bool -> !torch.int
%983 = torch.aten.mul.int %982, %int0_291 : !torch.int, !torch.int -> !torch.int
%984 = torch.aten.add.int %980, %983 : !torch.int, !torch.int -> !torch.int
%985 = torch.prim.ListConstruct %966, %972, %978, %984 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%986 = torch.aten.reshape %960, %985 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_299 = torch.constant.int 1
%int2_300 = torch.constant.int 2
%987 = torch.aten.transpose.int %986, %int1_299, %int2_300 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%988 = torch.vtensor.literal(dense_resource<__47> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_301 = torch.constant.int 0
%int0_302 = torch.constant.int 0
%989 = torch.aten.select.int %988, %int0_301, %int0_302 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%990 = torch.aten.item %989 : !torch.vtensor<[1],si64> -> !torch.int
%991 = torch.aten.eq.int %990, %int0_301 : !torch.int, !torch.int -> !torch.bool
%992 = torch.aten.Int.bool %991 : !torch.bool -> !torch.int
%int1_303 = torch.constant.int 1
%993 = torch.aten.mul.int %992, %int1_303 : !torch.int, !torch.int -> !torch.int
%994 = torch.aten.add.int %990, %993 : !torch.int, !torch.int -> !torch.int
%int1_304 = torch.constant.int 1
%995 = torch.aten.select.int %988, %int0_301, %int1_304 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%996 = torch.aten.item %995 : !torch.vtensor<[1],si64> -> !torch.int
%997 = torch.aten.eq.int %996, %int0_301 : !torch.int, !torch.int -> !torch.bool
%998 = torch.aten.Int.bool %997 : !torch.bool -> !torch.int
%int6_305 = torch.constant.int 6
%999 = torch.aten.mul.int %998, %int6_305 : !torch.int, !torch.int -> !torch.int
%1000 = torch.aten.add.int %996, %999 : !torch.int, !torch.int -> !torch.int
%int2_306 = torch.constant.int 2
%1001 = torch.aten.select.int %988, %int0_301, %int2_306 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1002 = torch.aten.item %1001 : !torch.vtensor<[1],si64> -> !torch.int
%1003 = torch.aten.eq.int %1002, %int0_301 : !torch.int, !torch.int -> !torch.bool
%1004 = torch.aten.Int.bool %1003 : !torch.bool -> !torch.int
%int768_307 = torch.constant.int 768
%1005 = torch.aten.mul.int %1004, %int768_307 : !torch.int, !torch.int -> !torch.int
%1006 = torch.aten.add.int %1002, %1005 : !torch.int, !torch.int -> !torch.int
%int3_308 = torch.constant.int 3
%1007 = torch.aten.select.int %988, %int0_301, %int3_308 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1008 = torch.aten.item %1007 : !torch.vtensor<[1],si64> -> !torch.int
%1009 = torch.aten.eq.int %1008, %int0_301 : !torch.int, !torch.int -> !torch.bool
%1010 = torch.aten.Int.bool %1009 : !torch.bool -> !torch.int
%1011 = torch.aten.mul.int %1010, %int0_301 : !torch.int, !torch.int -> !torch.int
%1012 = torch.aten.add.int %1008, %1011 : !torch.int, !torch.int -> !torch.int
%1013 = torch.prim.ListConstruct %994, %1000, %1006, %1012 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1014 = torch.aten.reshape %927, %1013 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_309 = torch.constant.int 1
%int2_310 = torch.constant.int 2
%1015 = torch.aten.transpose.int %1014, %int1_309, %int2_310 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1016 = torch.vtensor.literal(dense_resource<__48> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1017 = torch.vtensor.literal(dense_resource<__49> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1018 = torch.vtensor.literal(dense_resource<__50> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_311 = torch.constant.int 0
%int0_312 = torch.constant.int 0
%1019 = torch.aten.select.int %1016, %int0_311, %int0_312 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1020 = torch.aten.item %1019 : !torch.vtensor<[1],si64> -> !torch.int
%1021 = torch.aten.eq.int %1020, %int0_311 : !torch.int, !torch.int -> !torch.bool
%1022 = torch.aten.Int.bool %1021 : !torch.bool -> !torch.int
%int1_313 = torch.constant.int 1
%1023 = torch.aten.mul.int %1022, %int1_313 : !torch.int, !torch.int -> !torch.int
%1024 = torch.aten.add.int %1020, %1023 : !torch.int, !torch.int -> !torch.int
%int1_314 = torch.constant.int 1
%1025 = torch.aten.select.int %1016, %int0_311, %int1_314 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1026 = torch.aten.item %1025 : !torch.vtensor<[1],si64> -> !torch.int
%1027 = torch.aten.eq.int %1026, %int0_311 : !torch.int, !torch.int -> !torch.bool
%1028 = torch.aten.Int.bool %1027 : !torch.bool -> !torch.int
%int12_315 = torch.constant.int 12
%1029 = torch.aten.mul.int %1028, %int12_315 : !torch.int, !torch.int -> !torch.int
%1030 = torch.aten.add.int %1026, %1029 : !torch.int, !torch.int -> !torch.int
%int2_316 = torch.constant.int 2
%1031 = torch.aten.select.int %1016, %int0_311, %int2_316 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1032 = torch.aten.item %1031 : !torch.vtensor<[1],si64> -> !torch.int
%1033 = torch.aten.eq.int %1032, %int0_311 : !torch.int, !torch.int -> !torch.bool
%1034 = torch.aten.Int.bool %1033 : !torch.bool -> !torch.int
%int6_317 = torch.constant.int 6
%1035 = torch.aten.mul.int %1034, %int6_317 : !torch.int, !torch.int -> !torch.int
%1036 = torch.aten.add.int %1032, %1035 : !torch.int, !torch.int -> !torch.int
%1037 = torch.prim.ListConstruct %1024, %1030, %1036 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1038 = torch.aten.reshape %1015, %1037 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_318 = torch.constant.int 0
%int0_319 = torch.constant.int 0
%1039 = torch.aten.select.int %1017, %int0_318, %int0_319 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1040 = torch.aten.item %1039 : !torch.vtensor<[1],si64> -> !torch.int
%1041 = torch.aten.eq.int %1040, %int0_318 : !torch.int, !torch.int -> !torch.bool
%1042 = torch.aten.Int.bool %1041 : !torch.bool -> !torch.int
%int1_320 = torch.constant.int 1
%1043 = torch.aten.mul.int %1042, %int1_320 : !torch.int, !torch.int -> !torch.int
%1044 = torch.aten.add.int %1040, %1043 : !torch.int, !torch.int -> !torch.int
%int1_321 = torch.constant.int 1
%1045 = torch.aten.select.int %1017, %int0_318, %int1_321 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1046 = torch.aten.item %1045 : !torch.vtensor<[1],si64> -> !torch.int
%1047 = torch.aten.eq.int %1046, %int0_318 : !torch.int, !torch.int -> !torch.bool
%1048 = torch.aten.Int.bool %1047 : !torch.bool -> !torch.int
%int12_322 = torch.constant.int 12
%1049 = torch.aten.mul.int %1048, %int12_322 : !torch.int, !torch.int -> !torch.int
%1050 = torch.aten.add.int %1046, %1049 : !torch.int, !torch.int -> !torch.int
%int2_323 = torch.constant.int 2
%1051 = torch.aten.select.int %1017, %int0_318, %int2_323 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1052 = torch.aten.item %1051 : !torch.vtensor<[1],si64> -> !torch.int
%1053 = torch.aten.eq.int %1052, %int0_318 : !torch.int, !torch.int -> !torch.bool
%1054 = torch.aten.Int.bool %1053 : !torch.bool -> !torch.int
%int6_324 = torch.constant.int 6
%1055 = torch.aten.mul.int %1054, %int6_324 : !torch.int, !torch.int -> !torch.int
%1056 = torch.aten.add.int %1052, %1055 : !torch.int, !torch.int -> !torch.int
%1057 = torch.prim.ListConstruct %1044, %1050, %1056 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1058 = torch.aten.reshape %958, %1057 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_325 = torch.constant.int 0
%int0_326 = torch.constant.int 0
%1059 = torch.aten.select.int %1018, %int0_325, %int0_326 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1060 = torch.aten.item %1059 : !torch.vtensor<[1],si64> -> !torch.int
%1061 = torch.aten.eq.int %1060, %int0_325 : !torch.int, !torch.int -> !torch.bool
%1062 = torch.aten.Int.bool %1061 : !torch.bool -> !torch.int
%int1_327 = torch.constant.int 1
%1063 = torch.aten.mul.int %1062, %int1_327 : !torch.int, !torch.int -> !torch.int
%1064 = torch.aten.add.int %1060, %1063 : !torch.int, !torch.int -> !torch.int
%int1_328 = torch.constant.int 1
%1065 = torch.aten.select.int %1018, %int0_325, %int1_328 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1066 = torch.aten.item %1065 : !torch.vtensor<[1],si64> -> !torch.int
%1067 = torch.aten.eq.int %1066, %int0_325 : !torch.int, !torch.int -> !torch.bool
%1068 = torch.aten.Int.bool %1067 : !torch.bool -> !torch.int
%int12_329 = torch.constant.int 12
%1069 = torch.aten.mul.int %1068, %int12_329 : !torch.int, !torch.int -> !torch.int
%1070 = torch.aten.add.int %1066, %1069 : !torch.int, !torch.int -> !torch.int
%int2_330 = torch.constant.int 2
%1071 = torch.aten.select.int %1018, %int0_325, %int2_330 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1072 = torch.aten.item %1071 : !torch.vtensor<[1],si64> -> !torch.int
%1073 = torch.aten.eq.int %1072, %int0_325 : !torch.int, !torch.int -> !torch.bool
%1074 = torch.aten.Int.bool %1073 : !torch.bool -> !torch.int
%int6_331 = torch.constant.int 6
%1075 = torch.aten.mul.int %1074, %int6_331 : !torch.int, !torch.int -> !torch.int
%1076 = torch.aten.add.int %1072, %1075 : !torch.int, !torch.int -> !torch.int
%1077 = torch.prim.ListConstruct %1064, %1070, %1076 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1078 = torch.aten.reshape %987, %1077 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_332 = torch.constant.int 1
%int2_333 = torch.constant.int 2
%1079 = torch.aten.transpose.int %1058, %int1_332, %int2_333 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%1080 = torch.aten.matmul %1038, %1079 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%1081 = torch.vtensor.literal(dense_resource<__51> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_334 = torch.constant.int 0
%int0_335 = torch.constant.int 0
%1082 = torch.aten.select.int %1081, %int0_334, %int0_335 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1083 = torch.aten.item %1082 : !torch.vtensor<[1],si64> -> !torch.int
%1084 = torch.aten.eq.int %1083, %int0_334 : !torch.int, !torch.int -> !torch.bool
%1085 = torch.aten.Int.bool %1084 : !torch.bool -> !torch.int
%int12_336 = torch.constant.int 12
%1086 = torch.aten.mul.int %1085, %int12_336 : !torch.int, !torch.int -> !torch.int
%1087 = torch.aten.add.int %1083, %1086 : !torch.int, !torch.int -> !torch.int
%int1_337 = torch.constant.int 1
%1088 = torch.aten.select.int %1081, %int0_334, %int1_337 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1089 = torch.aten.item %1088 : !torch.vtensor<[1],si64> -> !torch.int
%1090 = torch.aten.eq.int %1089, %int0_334 : !torch.int, !torch.int -> !torch.bool
%1091 = torch.aten.Int.bool %1090 : !torch.bool -> !torch.int
%int6_338 = torch.constant.int 6
%1092 = torch.aten.mul.int %1091, %int6_338 : !torch.int, !torch.int -> !torch.int
%1093 = torch.aten.add.int %1089, %1092 : !torch.int, !torch.int -> !torch.int
%int2_339 = torch.constant.int 2
%1094 = torch.aten.select.int %1081, %int0_334, %int2_339 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1095 = torch.aten.item %1094 : !torch.vtensor<[1],si64> -> !torch.int
%1096 = torch.aten.eq.int %1095, %int0_334 : !torch.int, !torch.int -> !torch.bool
%1097 = torch.aten.Int.bool %1096 : !torch.bool -> !torch.int
%int6_340 = torch.constant.int 6
%1098 = torch.aten.mul.int %1097, %int6_340 : !torch.int, !torch.int -> !torch.int
%1099 = torch.aten.add.int %1095, %1098 : !torch.int, !torch.int -> !torch.int
%int3_341 = torch.constant.int 3
%1100 = torch.aten.select.int %1081, %int0_334, %int3_341 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1101 = torch.aten.item %1100 : !torch.vtensor<[1],si64> -> !torch.int
%1102 = torch.aten.eq.int %1101, %int0_334 : !torch.int, !torch.int -> !torch.bool
%1103 = torch.aten.Int.bool %1102 : !torch.bool -> !torch.int
%1104 = torch.aten.mul.int %1103, %int0_334 : !torch.int, !torch.int -> !torch.int
%1105 = torch.aten.add.int %1101, %1104 : !torch.int, !torch.int -> !torch.int
%1106 = torch.prim.ListConstruct %1087, %1093, %1099, %1105 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1107 = torch.aten.reshape %1080, %1106 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_342 = torch.constant.int 1
%1108 = torch.aten.add.Tensor %1107, %277, %int1_342 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%1109 = torch.vtensor.literal(dense_resource<__52> : tensor<f32>) : !torch.vtensor<[],f32>
%1110 = torch.aten.maximum %1108, %1109 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%1111 = torch.vtensor.literal(dense_resource<__53> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_343 = torch.constant.int 0
%int0_344 = torch.constant.int 0
%1112 = torch.aten.select.int %1111, %int0_343, %int0_344 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1113 = torch.aten.item %1112 : !torch.vtensor<[1],si64> -> !torch.int
%1114 = torch.aten.eq.int %1113, %int0_343 : !torch.int, !torch.int -> !torch.bool
%1115 = torch.aten.Int.bool %1114 : !torch.bool -> !torch.int
%int-1_345 = torch.constant.int -1
%1116 = torch.aten.mul.int %1115, %int-1_345 : !torch.int, !torch.int -> !torch.int
%1117 = torch.aten.add.int %1113, %1116 : !torch.int, !torch.int -> !torch.int
%int1_346 = torch.constant.int 1
%1118 = torch.aten.select.int %1111, %int0_343, %int1_346 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1119 = torch.aten.item %1118 : !torch.vtensor<[1],si64> -> !torch.int
%1120 = torch.aten.eq.int %1119, %int0_343 : !torch.int, !torch.int -> !torch.bool
%1121 = torch.aten.Int.bool %1120 : !torch.bool -> !torch.int
%int12_347 = torch.constant.int 12
%1122 = torch.aten.mul.int %1121, %int12_347 : !torch.int, !torch.int -> !torch.int
%1123 = torch.aten.add.int %1119, %1122 : !torch.int, !torch.int -> !torch.int
%int2_348 = torch.constant.int 2
%1124 = torch.aten.select.int %1111, %int0_343, %int2_348 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1125 = torch.aten.item %1124 : !torch.vtensor<[1],si64> -> !torch.int
%1126 = torch.aten.eq.int %1125, %int0_343 : !torch.int, !torch.int -> !torch.bool
%1127 = torch.aten.Int.bool %1126 : !torch.bool -> !torch.int
%int6_349 = torch.constant.int 6
%1128 = torch.aten.mul.int %1127, %int6_349 : !torch.int, !torch.int -> !torch.int
%1129 = torch.aten.add.int %1125, %1128 : !torch.int, !torch.int -> !torch.int
%1130 = torch.prim.ListConstruct %1117, %1123, %1129 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1131 = torch.aten.reshape %1110, %1130 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_350 = torch.constant.int 2
%none_351 = torch.constant.none
%1132 = torch.aten.softmax.int %1131, %int2_350, %none_351 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%1133 = torch.aten.matmul %1132, %1078 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%1134 = torch.vtensor.literal(dense_resource<__54> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_352 = torch.constant.int 0
%int0_353 = torch.constant.int 0
%1135 = torch.aten.select.int %1134, %int0_352, %int0_353 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1136 = torch.aten.item %1135 : !torch.vtensor<[1],si64> -> !torch.int
%1137 = torch.aten.eq.int %1136, %int0_352 : !torch.int, !torch.int -> !torch.bool
%1138 = torch.aten.Int.bool %1137 : !torch.bool -> !torch.int
%int12_354 = torch.constant.int 12
%1139 = torch.aten.mul.int %1138, %int12_354 : !torch.int, !torch.int -> !torch.int
%1140 = torch.aten.add.int %1136, %1139 : !torch.int, !torch.int -> !torch.int
%int1_355 = torch.constant.int 1
%1141 = torch.aten.select.int %1134, %int0_352, %int1_355 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1142 = torch.aten.item %1141 : !torch.vtensor<[1],si64> -> !torch.int
%1143 = torch.aten.eq.int %1142, %int0_352 : !torch.int, !torch.int -> !torch.bool
%1144 = torch.aten.Int.bool %1143 : !torch.bool -> !torch.int
%int6_356 = torch.constant.int 6
%1145 = torch.aten.mul.int %1144, %int6_356 : !torch.int, !torch.int -> !torch.int
%1146 = torch.aten.add.int %1142, %1145 : !torch.int, !torch.int -> !torch.int
%int2_357 = torch.constant.int 2
%1147 = torch.aten.select.int %1134, %int0_352, %int2_357 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1148 = torch.aten.item %1147 : !torch.vtensor<[1],si64> -> !torch.int
%1149 = torch.aten.eq.int %1148, %int0_352 : !torch.int, !torch.int -> !torch.bool
%1150 = torch.aten.Int.bool %1149 : !torch.bool -> !torch.int
%int64_358 = torch.constant.int 64
%1151 = torch.aten.mul.int %1150, %int64_358 : !torch.int, !torch.int -> !torch.int
%1152 = torch.aten.add.int %1148, %1151 : !torch.int, !torch.int -> !torch.int
%int3_359 = torch.constant.int 3
%1153 = torch.aten.select.int %1134, %int0_352, %int3_359 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1154 = torch.aten.item %1153 : !torch.vtensor<[1],si64> -> !torch.int
%1155 = torch.aten.eq.int %1154, %int0_352 : !torch.int, !torch.int -> !torch.bool
%1156 = torch.aten.Int.bool %1155 : !torch.bool -> !torch.int
%1157 = torch.aten.mul.int %1156, %int0_352 : !torch.int, !torch.int -> !torch.int
%1158 = torch.aten.add.int %1154, %1157 : !torch.int, !torch.int -> !torch.int
%1159 = torch.prim.ListConstruct %1140, %1146, %1152, %1158 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1160 = torch.aten.reshape %1133, %1159 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_360 = torch.constant.int 1
%int2_361 = torch.constant.int 2
%1161 = torch.aten.transpose.int %1160, %int1_360, %int2_361 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%1162 = torch.vtensor.literal(dense_resource<__55> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_362 = torch.constant.int 0
%int0_363 = torch.constant.int 0
%1163 = torch.aten.select.int %1162, %int0_362, %int0_363 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1164 = torch.aten.item %1163 : !torch.vtensor<[1],si64> -> !torch.int
%1165 = torch.aten.eq.int %1164, %int0_362 : !torch.int, !torch.int -> !torch.bool
%1166 = torch.aten.Int.bool %1165 : !torch.bool -> !torch.int
%int1_364 = torch.constant.int 1
%1167 = torch.aten.mul.int %1166, %int1_364 : !torch.int, !torch.int -> !torch.int
%1168 = torch.aten.add.int %1164, %1167 : !torch.int, !torch.int -> !torch.int
%int1_365 = torch.constant.int 1
%1169 = torch.aten.select.int %1162, %int0_362, %int1_365 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1170 = torch.aten.item %1169 : !torch.vtensor<[1],si64> -> !torch.int
%1171 = torch.aten.eq.int %1170, %int0_362 : !torch.int, !torch.int -> !torch.bool
%1172 = torch.aten.Int.bool %1171 : !torch.bool -> !torch.int
%int6_366 = torch.constant.int 6
%1173 = torch.aten.mul.int %1172, %int6_366 : !torch.int, !torch.int -> !torch.int
%1174 = torch.aten.add.int %1170, %1173 : !torch.int, !torch.int -> !torch.int
%int2_367 = torch.constant.int 2
%1175 = torch.aten.select.int %1162, %int0_362, %int2_367 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1176 = torch.aten.item %1175 : !torch.vtensor<[1],si64> -> !torch.int
%1177 = torch.aten.eq.int %1176, %int0_362 : !torch.int, !torch.int -> !torch.bool
%1178 = torch.aten.Int.bool %1177 : !torch.bool -> !torch.int
%int12_368 = torch.constant.int 12
%1179 = torch.aten.mul.int %1178, %int12_368 : !torch.int, !torch.int -> !torch.int
%1180 = torch.aten.add.int %1176, %1179 : !torch.int, !torch.int -> !torch.int
%1181 = torch.prim.ListConstruct %1168, %1174, %1180 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1182 = torch.aten.reshape %1161, %1181 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%1183 = torch.aten.matmul %1182, %159 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_369 = torch.constant.int 1
%1184 = torch.aten.add.Tensor %31, %1183, %int1_369 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_370 = torch.constant.int 1
%1185 = torch.aten.add.Tensor %922, %1184, %int1_370 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1186 = torch.vtensor.literal(dense_resource<__56> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_371 = torch.constant.int 0
%int0_372 = torch.constant.int 0
%1187 = torch.aten.select.int %1186, %int0_371, %int0_372 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1188 = torch.aten.item %1187 : !torch.vtensor<[1],si64> -> !torch.int
%1189 = torch.aten.eq.int %1188, %int0_371 : !torch.int, !torch.int -> !torch.bool
%1190 = torch.aten.Int.bool %1189 : !torch.bool -> !torch.int
%int1_373 = torch.constant.int 1
%1191 = torch.aten.mul.int %1190, %int1_373 : !torch.int, !torch.int -> !torch.int
%1192 = torch.aten.add.int %1188, %1191 : !torch.int, !torch.int -> !torch.int
%int1_374 = torch.constant.int 1
%1193 = torch.aten.select.int %1186, %int0_371, %int1_374 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1194 = torch.aten.item %1193 : !torch.vtensor<[1],si64> -> !torch.int
%1195 = torch.aten.eq.int %1194, %int0_371 : !torch.int, !torch.int -> !torch.bool
%1196 = torch.aten.Int.bool %1195 : !torch.bool -> !torch.int
%int6_375 = torch.constant.int 6
%1197 = torch.aten.mul.int %1196, %int6_375 : !torch.int, !torch.int -> !torch.int
%1198 = torch.aten.add.int %1194, %1197 : !torch.int, !torch.int -> !torch.int
%1199 = torch.prim.ListConstruct %1192, %1198 : (!torch.int, !torch.int) -> !torch.list<int>
%1200 = torch.aten.reshape %1185, %1199 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_376 = torch.constant.float 9.9999997473787516E-6
%int768_377 = torch.constant.int 768
%1201 = torch.prim.ListConstruct %int768_377 : (!torch.int) -> !torch.list<int>
%result0_378, %result1_379, %result2_380 = torch.aten.native_layer_norm %1200, %1201, %38, %39, %float9.999990e-06_376 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_381 = torch.constant.int 0
%int1_382 = torch.constant.int 1
%1202 = torch.aten.transpose.int %34, %int0_381, %int1_382 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%1203 = torch.aten.mm %result0_378, %1202 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%1204 = torch.aten.add.Tensor %1203, %35, %int1_382 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%1205 = torch.aten.relu %1204 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_383 = torch.constant.int 0
%int1_384 = torch.constant.int 1
%1206 = torch.aten.transpose.int %36, %int0_383, %int1_384 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%1207 = torch.aten.mm %1205, %1206 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%1208 = torch.aten.add.Tensor %1207, %37, %int1_384 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_385 = torch.constant.int 1
%1209 = torch.aten.add.Tensor %1200, %1208, %int1_385 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%1210 = torch.vtensor.literal(dense_resource<__57> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_386 = torch.constant.int 0
%int0_387 = torch.constant.int 0
%1211 = torch.aten.select.int %1210, %int0_386, %int0_387 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1212 = torch.aten.item %1211 : !torch.vtensor<[1],si64> -> !torch.int
%1213 = torch.aten.eq.int %1212, %int0_386 : !torch.int, !torch.int -> !torch.bool
%1214 = torch.aten.Int.bool %1213 : !torch.bool -> !torch.int
%int6_388 = torch.constant.int 6
%1215 = torch.aten.mul.int %1214, %int6_388 : !torch.int, !torch.int -> !torch.int
%1216 = torch.aten.add.int %1212, %1215 : !torch.int, !torch.int -> !torch.int
%int1_389 = torch.constant.int 1
%1217 = torch.aten.select.int %1210, %int0_386, %int1_389 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1218 = torch.aten.item %1217 : !torch.vtensor<[1],si64> -> !torch.int
%1219 = torch.aten.eq.int %1218, %int0_386 : !torch.int, !torch.int -> !torch.bool
%1220 = torch.aten.Int.bool %1219 : !torch.bool -> !torch.int
%int768_390 = torch.constant.int 768
%1221 = torch.aten.mul.int %1220, %int768_390 : !torch.int, !torch.int -> !torch.int
%1222 = torch.aten.add.int %1218, %1221 : !torch.int, !torch.int -> !torch.int
%int2_391 = torch.constant.int 2
%1223 = torch.aten.select.int %1210, %int0_386, %int2_391 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1224 = torch.aten.item %1223 : !torch.vtensor<[1],si64> -> !torch.int
%1225 = torch.aten.eq.int %1224, %int0_386 : !torch.int, !torch.int -> !torch.bool
%1226 = torch.aten.Int.bool %1225 : !torch.bool -> !torch.int
%1227 = torch.aten.mul.int %1226, %int0_386 : !torch.int, !torch.int -> !torch.int
%1228 = torch.aten.add.int %1224, %1227 : !torch.int, !torch.int -> !torch.int
%1229 = torch.prim.ListConstruct %1216, %1222, %1228 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1230 = torch.aten.reshape %1209, %1229 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_392 = torch.constant.float 9.9999997473787516E-6
%int768_393 = torch.constant.int 768
%1231 = torch.prim.ListConstruct %int768_393 : (!torch.int) -> !torch.list<int>
%result0_394, %result1_395, %result2_396 = torch.aten.native_layer_norm %1230, %1231, %44, %45, %float9.999990e-06_392 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%1232 = torch.aten.matmul %result0_394, %160 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_397 = torch.constant.int 1
%1233 = torch.aten.add.Tensor %42, %1232, %int1_397 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1234 = torch.vtensor.literal(dense_resource<__58> : tensor<f32>) : !torch.vtensor<[],f32>
%1235 = torch.aten.mul.Tensor %1233, %1234 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%1236 = torch.aten.matmul %result0_394, %161 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_398 = torch.constant.int 1
%1237 = torch.aten.add.Tensor %40, %1236, %int1_398 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1238 = torch.vtensor.literal(dense_resource<__59> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%1239 = torch.vtensor.literal(dense_resource<__60> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_399 = torch.constant.int 0
%int0_400 = torch.constant.int 0
%1240 = torch.aten.select.int %1238, %int0_399, %int0_400 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1241 = torch.aten.item %1240 : !torch.vtensor<[1],si64> -> !torch.int
%1242 = torch.aten.eq.int %1241, %int0_399 : !torch.int, !torch.int -> !torch.bool
%1243 = torch.aten.Int.bool %1242 : !torch.bool -> !torch.int
%int1_401 = torch.constant.int 1
%1244 = torch.aten.mul.int %1243, %int1_401 : !torch.int, !torch.int -> !torch.int
%1245 = torch.aten.add.int %1241, %1244 : !torch.int, !torch.int -> !torch.int
%int1_402 = torch.constant.int 1
%1246 = torch.aten.select.int %1238, %int0_399, %int1_402 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1247 = torch.aten.item %1246 : !torch.vtensor<[1],si64> -> !torch.int
%1248 = torch.aten.eq.int %1247, %int0_399 : !torch.int, !torch.int -> !torch.bool
%1249 = torch.aten.Int.bool %1248 : !torch.bool -> !torch.int
%int6_403 = torch.constant.int 6
%1250 = torch.aten.mul.int %1249, %int6_403 : !torch.int, !torch.int -> !torch.int
%1251 = torch.aten.add.int %1247, %1250 : !torch.int, !torch.int -> !torch.int
%int2_404 = torch.constant.int 2
%1252 = torch.aten.select.int %1238, %int0_399, %int2_404 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1253 = torch.aten.item %1252 : !torch.vtensor<[1],si64> -> !torch.int
%1254 = torch.aten.eq.int %1253, %int0_399 : !torch.int, !torch.int -> !torch.bool
%1255 = torch.aten.Int.bool %1254 : !torch.bool -> !torch.int
%int768_405 = torch.constant.int 768
%1256 = torch.aten.mul.int %1255, %int768_405 : !torch.int, !torch.int -> !torch.int
%1257 = torch.aten.add.int %1253, %1256 : !torch.int, !torch.int -> !torch.int
%int3_406 = torch.constant.int 3
%1258 = torch.aten.select.int %1238, %int0_399, %int3_406 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1259 = torch.aten.item %1258 : !torch.vtensor<[1],si64> -> !torch.int
%1260 = torch.aten.eq.int %1259, %int0_399 : !torch.int, !torch.int -> !torch.bool
%1261 = torch.aten.Int.bool %1260 : !torch.bool -> !torch.int
%1262 = torch.aten.mul.int %1261, %int0_399 : !torch.int, !torch.int -> !torch.int
%1263 = torch.aten.add.int %1259, %1262 : !torch.int, !torch.int -> !torch.int
%1264 = torch.prim.ListConstruct %1245, %1251, %1257, %1263 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1265 = torch.aten.reshape %1237, %1264 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_407 = torch.constant.int 1
%int2_408 = torch.constant.int 2
%1266 = torch.aten.transpose.int %1265, %int1_407, %int2_408 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1267 = torch.aten.matmul %result0_394, %162 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_409 = torch.constant.int 1
%1268 = torch.aten.add.Tensor %41, %1267, %int1_409 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_410 = torch.constant.int 0
%int0_411 = torch.constant.int 0
%1269 = torch.aten.select.int %1239, %int0_410, %int0_411 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1270 = torch.aten.item %1269 : !torch.vtensor<[1],si64> -> !torch.int
%1271 = torch.aten.eq.int %1270, %int0_410 : !torch.int, !torch.int -> !torch.bool
%1272 = torch.aten.Int.bool %1271 : !torch.bool -> !torch.int
%int1_412 = torch.constant.int 1
%1273 = torch.aten.mul.int %1272, %int1_412 : !torch.int, !torch.int -> !torch.int
%1274 = torch.aten.add.int %1270, %1273 : !torch.int, !torch.int -> !torch.int
%int1_413 = torch.constant.int 1
%1275 = torch.aten.select.int %1239, %int0_410, %int1_413 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1276 = torch.aten.item %1275 : !torch.vtensor<[1],si64> -> !torch.int
%1277 = torch.aten.eq.int %1276, %int0_410 : !torch.int, !torch.int -> !torch.bool
%1278 = torch.aten.Int.bool %1277 : !torch.bool -> !torch.int
%int6_414 = torch.constant.int 6
%1279 = torch.aten.mul.int %1278, %int6_414 : !torch.int, !torch.int -> !torch.int
%1280 = torch.aten.add.int %1276, %1279 : !torch.int, !torch.int -> !torch.int
%int2_415 = torch.constant.int 2
%1281 = torch.aten.select.int %1239, %int0_410, %int2_415 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1282 = torch.aten.item %1281 : !torch.vtensor<[1],si64> -> !torch.int
%1283 = torch.aten.eq.int %1282, %int0_410 : !torch.int, !torch.int -> !torch.bool
%1284 = torch.aten.Int.bool %1283 : !torch.bool -> !torch.int
%int768_416 = torch.constant.int 768
%1285 = torch.aten.mul.int %1284, %int768_416 : !torch.int, !torch.int -> !torch.int
%1286 = torch.aten.add.int %1282, %1285 : !torch.int, !torch.int -> !torch.int
%int3_417 = torch.constant.int 3
%1287 = torch.aten.select.int %1239, %int0_410, %int3_417 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1288 = torch.aten.item %1287 : !torch.vtensor<[1],si64> -> !torch.int
%1289 = torch.aten.eq.int %1288, %int0_410 : !torch.int, !torch.int -> !torch.bool
%1290 = torch.aten.Int.bool %1289 : !torch.bool -> !torch.int
%1291 = torch.aten.mul.int %1290, %int0_410 : !torch.int, !torch.int -> !torch.int
%1292 = torch.aten.add.int %1288, %1291 : !torch.int, !torch.int -> !torch.int
%1293 = torch.prim.ListConstruct %1274, %1280, %1286, %1292 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1294 = torch.aten.reshape %1268, %1293 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_418 = torch.constant.int 1
%int2_419 = torch.constant.int 2
%1295 = torch.aten.transpose.int %1294, %int1_418, %int2_419 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1296 = torch.vtensor.literal(dense_resource<__61> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_420 = torch.constant.int 0
%int0_421 = torch.constant.int 0
%1297 = torch.aten.select.int %1296, %int0_420, %int0_421 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1298 = torch.aten.item %1297 : !torch.vtensor<[1],si64> -> !torch.int
%1299 = torch.aten.eq.int %1298, %int0_420 : !torch.int, !torch.int -> !torch.bool
%1300 = torch.aten.Int.bool %1299 : !torch.bool -> !torch.int
%int1_422 = torch.constant.int 1
%1301 = torch.aten.mul.int %1300, %int1_422 : !torch.int, !torch.int -> !torch.int
%1302 = torch.aten.add.int %1298, %1301 : !torch.int, !torch.int -> !torch.int
%int1_423 = torch.constant.int 1
%1303 = torch.aten.select.int %1296, %int0_420, %int1_423 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1304 = torch.aten.item %1303 : !torch.vtensor<[1],si64> -> !torch.int
%1305 = torch.aten.eq.int %1304, %int0_420 : !torch.int, !torch.int -> !torch.bool
%1306 = torch.aten.Int.bool %1305 : !torch.bool -> !torch.int
%int6_424 = torch.constant.int 6
%1307 = torch.aten.mul.int %1306, %int6_424 : !torch.int, !torch.int -> !torch.int
%1308 = torch.aten.add.int %1304, %1307 : !torch.int, !torch.int -> !torch.int
%int2_425 = torch.constant.int 2
%1309 = torch.aten.select.int %1296, %int0_420, %int2_425 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1310 = torch.aten.item %1309 : !torch.vtensor<[1],si64> -> !torch.int
%1311 = torch.aten.eq.int %1310, %int0_420 : !torch.int, !torch.int -> !torch.bool
%1312 = torch.aten.Int.bool %1311 : !torch.bool -> !torch.int
%int768_426 = torch.constant.int 768
%1313 = torch.aten.mul.int %1312, %int768_426 : !torch.int, !torch.int -> !torch.int
%1314 = torch.aten.add.int %1310, %1313 : !torch.int, !torch.int -> !torch.int
%int3_427 = torch.constant.int 3
%1315 = torch.aten.select.int %1296, %int0_420, %int3_427 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1316 = torch.aten.item %1315 : !torch.vtensor<[1],si64> -> !torch.int
%1317 = torch.aten.eq.int %1316, %int0_420 : !torch.int, !torch.int -> !torch.bool
%1318 = torch.aten.Int.bool %1317 : !torch.bool -> !torch.int
%1319 = torch.aten.mul.int %1318, %int0_420 : !torch.int, !torch.int -> !torch.int
%1320 = torch.aten.add.int %1316, %1319 : !torch.int, !torch.int -> !torch.int
%1321 = torch.prim.ListConstruct %1302, %1308, %1314, %1320 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1322 = torch.aten.reshape %1235, %1321 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_428 = torch.constant.int 1
%int2_429 = torch.constant.int 2
%1323 = torch.aten.transpose.int %1322, %int1_428, %int2_429 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1324 = torch.vtensor.literal(dense_resource<__62> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1325 = torch.vtensor.literal(dense_resource<__63> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1326 = torch.vtensor.literal(dense_resource<__64> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_430 = torch.constant.int 0
%int0_431 = torch.constant.int 0
%1327 = torch.aten.select.int %1324, %int0_430, %int0_431 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1328 = torch.aten.item %1327 : !torch.vtensor<[1],si64> -> !torch.int
%1329 = torch.aten.eq.int %1328, %int0_430 : !torch.int, !torch.int -> !torch.bool
%1330 = torch.aten.Int.bool %1329 : !torch.bool -> !torch.int
%int1_432 = torch.constant.int 1
%1331 = torch.aten.mul.int %1330, %int1_432 : !torch.int, !torch.int -> !torch.int
%1332 = torch.aten.add.int %1328, %1331 : !torch.int, !torch.int -> !torch.int
%int1_433 = torch.constant.int 1
%1333 = torch.aten.select.int %1324, %int0_430, %int1_433 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1334 = torch.aten.item %1333 : !torch.vtensor<[1],si64> -> !torch.int
%1335 = torch.aten.eq.int %1334, %int0_430 : !torch.int, !torch.int -> !torch.bool
%1336 = torch.aten.Int.bool %1335 : !torch.bool -> !torch.int
%int12_434 = torch.constant.int 12
%1337 = torch.aten.mul.int %1336, %int12_434 : !torch.int, !torch.int -> !torch.int
%1338 = torch.aten.add.int %1334, %1337 : !torch.int, !torch.int -> !torch.int
%int2_435 = torch.constant.int 2
%1339 = torch.aten.select.int %1324, %int0_430, %int2_435 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1340 = torch.aten.item %1339 : !torch.vtensor<[1],si64> -> !torch.int
%1341 = torch.aten.eq.int %1340, %int0_430 : !torch.int, !torch.int -> !torch.bool
%1342 = torch.aten.Int.bool %1341 : !torch.bool -> !torch.int
%int6_436 = torch.constant.int 6
%1343 = torch.aten.mul.int %1342, %int6_436 : !torch.int, !torch.int -> !torch.int
%1344 = torch.aten.add.int %1340, %1343 : !torch.int, !torch.int -> !torch.int
%1345 = torch.prim.ListConstruct %1332, %1338, %1344 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1346 = torch.aten.reshape %1323, %1345 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_437 = torch.constant.int 0
%int0_438 = torch.constant.int 0
%1347 = torch.aten.select.int %1325, %int0_437, %int0_438 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1348 = torch.aten.item %1347 : !torch.vtensor<[1],si64> -> !torch.int
%1349 = torch.aten.eq.int %1348, %int0_437 : !torch.int, !torch.int -> !torch.bool
%1350 = torch.aten.Int.bool %1349 : !torch.bool -> !torch.int
%int1_439 = torch.constant.int 1
%1351 = torch.aten.mul.int %1350, %int1_439 : !torch.int, !torch.int -> !torch.int
%1352 = torch.aten.add.int %1348, %1351 : !torch.int, !torch.int -> !torch.int
%int1_440 = torch.constant.int 1
%1353 = torch.aten.select.int %1325, %int0_437, %int1_440 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1354 = torch.aten.item %1353 : !torch.vtensor<[1],si64> -> !torch.int
%1355 = torch.aten.eq.int %1354, %int0_437 : !torch.int, !torch.int -> !torch.bool
%1356 = torch.aten.Int.bool %1355 : !torch.bool -> !torch.int
%int12_441 = torch.constant.int 12
%1357 = torch.aten.mul.int %1356, %int12_441 : !torch.int, !torch.int -> !torch.int
%1358 = torch.aten.add.int %1354, %1357 : !torch.int, !torch.int -> !torch.int
%int2_442 = torch.constant.int 2
%1359 = torch.aten.select.int %1325, %int0_437, %int2_442 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1360 = torch.aten.item %1359 : !torch.vtensor<[1],si64> -> !torch.int
%1361 = torch.aten.eq.int %1360, %int0_437 : !torch.int, !torch.int -> !torch.bool
%1362 = torch.aten.Int.bool %1361 : !torch.bool -> !torch.int
%int6_443 = torch.constant.int 6
%1363 = torch.aten.mul.int %1362, %int6_443 : !torch.int, !torch.int -> !torch.int
%1364 = torch.aten.add.int %1360, %1363 : !torch.int, !torch.int -> !torch.int
%1365 = torch.prim.ListConstruct %1352, %1358, %1364 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1366 = torch.aten.reshape %1266, %1365 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_444 = torch.constant.int 0
%int0_445 = torch.constant.int 0
%1367 = torch.aten.select.int %1326, %int0_444, %int0_445 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1368 = torch.aten.item %1367 : !torch.vtensor<[1],si64> -> !torch.int
%1369 = torch.aten.eq.int %1368, %int0_444 : !torch.int, !torch.int -> !torch.bool
%1370 = torch.aten.Int.bool %1369 : !torch.bool -> !torch.int
%int1_446 = torch.constant.int 1
%1371 = torch.aten.mul.int %1370, %int1_446 : !torch.int, !torch.int -> !torch.int
%1372 = torch.aten.add.int %1368, %1371 : !torch.int, !torch.int -> !torch.int
%int1_447 = torch.constant.int 1
%1373 = torch.aten.select.int %1326, %int0_444, %int1_447 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1374 = torch.aten.item %1373 : !torch.vtensor<[1],si64> -> !torch.int
%1375 = torch.aten.eq.int %1374, %int0_444 : !torch.int, !torch.int -> !torch.bool
%1376 = torch.aten.Int.bool %1375 : !torch.bool -> !torch.int
%int12_448 = torch.constant.int 12
%1377 = torch.aten.mul.int %1376, %int12_448 : !torch.int, !torch.int -> !torch.int
%1378 = torch.aten.add.int %1374, %1377 : !torch.int, !torch.int -> !torch.int
%int2_449 = torch.constant.int 2
%1379 = torch.aten.select.int %1326, %int0_444, %int2_449 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1380 = torch.aten.item %1379 : !torch.vtensor<[1],si64> -> !torch.int
%1381 = torch.aten.eq.int %1380, %int0_444 : !torch.int, !torch.int -> !torch.bool
%1382 = torch.aten.Int.bool %1381 : !torch.bool -> !torch.int
%int6_450 = torch.constant.int 6
%1383 = torch.aten.mul.int %1382, %int6_450 : !torch.int, !torch.int -> !torch.int
%1384 = torch.aten.add.int %1380, %1383 : !torch.int, !torch.int -> !torch.int
%1385 = torch.prim.ListConstruct %1372, %1378, %1384 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1386 = torch.aten.reshape %1295, %1385 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_451 = torch.constant.int 1
%int2_452 = torch.constant.int 2
%1387 = torch.aten.transpose.int %1366, %int1_451, %int2_452 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%1388 = torch.aten.matmul %1346, %1387 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%1389 = torch.vtensor.literal(dense_resource<__65> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_453 = torch.constant.int 0
%int0_454 = torch.constant.int 0
%1390 = torch.aten.select.int %1389, %int0_453, %int0_454 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1391 = torch.aten.item %1390 : !torch.vtensor<[1],si64> -> !torch.int
%1392 = torch.aten.eq.int %1391, %int0_453 : !torch.int, !torch.int -> !torch.bool
%1393 = torch.aten.Int.bool %1392 : !torch.bool -> !torch.int
%int12_455 = torch.constant.int 12
%1394 = torch.aten.mul.int %1393, %int12_455 : !torch.int, !torch.int -> !torch.int
%1395 = torch.aten.add.int %1391, %1394 : !torch.int, !torch.int -> !torch.int
%int1_456 = torch.constant.int 1
%1396 = torch.aten.select.int %1389, %int0_453, %int1_456 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1397 = torch.aten.item %1396 : !torch.vtensor<[1],si64> -> !torch.int
%1398 = torch.aten.eq.int %1397, %int0_453 : !torch.int, !torch.int -> !torch.bool
%1399 = torch.aten.Int.bool %1398 : !torch.bool -> !torch.int
%int6_457 = torch.constant.int 6
%1400 = torch.aten.mul.int %1399, %int6_457 : !torch.int, !torch.int -> !torch.int
%1401 = torch.aten.add.int %1397, %1400 : !torch.int, !torch.int -> !torch.int
%int2_458 = torch.constant.int 2
%1402 = torch.aten.select.int %1389, %int0_453, %int2_458 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1403 = torch.aten.item %1402 : !torch.vtensor<[1],si64> -> !torch.int
%1404 = torch.aten.eq.int %1403, %int0_453 : !torch.int, !torch.int -> !torch.bool
%1405 = torch.aten.Int.bool %1404 : !torch.bool -> !torch.int
%int6_459 = torch.constant.int 6
%1406 = torch.aten.mul.int %1405, %int6_459 : !torch.int, !torch.int -> !torch.int
%1407 = torch.aten.add.int %1403, %1406 : !torch.int, !torch.int -> !torch.int
%int3_460 = torch.constant.int 3
%1408 = torch.aten.select.int %1389, %int0_453, %int3_460 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1409 = torch.aten.item %1408 : !torch.vtensor<[1],si64> -> !torch.int
%1410 = torch.aten.eq.int %1409, %int0_453 : !torch.int, !torch.int -> !torch.bool
%1411 = torch.aten.Int.bool %1410 : !torch.bool -> !torch.int
%1412 = torch.aten.mul.int %1411, %int0_453 : !torch.int, !torch.int -> !torch.int
%1413 = torch.aten.add.int %1409, %1412 : !torch.int, !torch.int -> !torch.int
%1414 = torch.prim.ListConstruct %1395, %1401, %1407, %1413 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1415 = torch.aten.reshape %1388, %1414 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_461 = torch.constant.int 1
%1416 = torch.aten.add.Tensor %1415, %277, %int1_461 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%1417 = torch.vtensor.literal(dense_resource<__66> : tensor<f32>) : !torch.vtensor<[],f32>
%1418 = torch.aten.maximum %1416, %1417 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%1419 = torch.vtensor.literal(dense_resource<__67> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_462 = torch.constant.int 0
%int0_463 = torch.constant.int 0
%1420 = torch.aten.select.int %1419, %int0_462, %int0_463 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1421 = torch.aten.item %1420 : !torch.vtensor<[1],si64> -> !torch.int
%1422 = torch.aten.eq.int %1421, %int0_462 : !torch.int, !torch.int -> !torch.bool
%1423 = torch.aten.Int.bool %1422 : !torch.bool -> !torch.int
%int-1_464 = torch.constant.int -1
%1424 = torch.aten.mul.int %1423, %int-1_464 : !torch.int, !torch.int -> !torch.int
%1425 = torch.aten.add.int %1421, %1424 : !torch.int, !torch.int -> !torch.int
%int1_465 = torch.constant.int 1
%1426 = torch.aten.select.int %1419, %int0_462, %int1_465 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1427 = torch.aten.item %1426 : !torch.vtensor<[1],si64> -> !torch.int
%1428 = torch.aten.eq.int %1427, %int0_462 : !torch.int, !torch.int -> !torch.bool
%1429 = torch.aten.Int.bool %1428 : !torch.bool -> !torch.int
%int12_466 = torch.constant.int 12
%1430 = torch.aten.mul.int %1429, %int12_466 : !torch.int, !torch.int -> !torch.int
%1431 = torch.aten.add.int %1427, %1430 : !torch.int, !torch.int -> !torch.int
%int2_467 = torch.constant.int 2
%1432 = torch.aten.select.int %1419, %int0_462, %int2_467 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1433 = torch.aten.item %1432 : !torch.vtensor<[1],si64> -> !torch.int
%1434 = torch.aten.eq.int %1433, %int0_462 : !torch.int, !torch.int -> !torch.bool
%1435 = torch.aten.Int.bool %1434 : !torch.bool -> !torch.int
%int6_468 = torch.constant.int 6
%1436 = torch.aten.mul.int %1435, %int6_468 : !torch.int, !torch.int -> !torch.int
%1437 = torch.aten.add.int %1433, %1436 : !torch.int, !torch.int -> !torch.int
%1438 = torch.prim.ListConstruct %1425, %1431, %1437 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1439 = torch.aten.reshape %1418, %1438 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_469 = torch.constant.int 2
%none_470 = torch.constant.none
%1440 = torch.aten.softmax.int %1439, %int2_469, %none_470 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%1441 = torch.aten.matmul %1440, %1386 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%1442 = torch.vtensor.literal(dense_resource<__68> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_471 = torch.constant.int 0
%int0_472 = torch.constant.int 0
%1443 = torch.aten.select.int %1442, %int0_471, %int0_472 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1444 = torch.aten.item %1443 : !torch.vtensor<[1],si64> -> !torch.int
%1445 = torch.aten.eq.int %1444, %int0_471 : !torch.int, !torch.int -> !torch.bool
%1446 = torch.aten.Int.bool %1445 : !torch.bool -> !torch.int
%int12_473 = torch.constant.int 12
%1447 = torch.aten.mul.int %1446, %int12_473 : !torch.int, !torch.int -> !torch.int
%1448 = torch.aten.add.int %1444, %1447 : !torch.int, !torch.int -> !torch.int
%int1_474 = torch.constant.int 1
%1449 = torch.aten.select.int %1442, %int0_471, %int1_474 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1450 = torch.aten.item %1449 : !torch.vtensor<[1],si64> -> !torch.int
%1451 = torch.aten.eq.int %1450, %int0_471 : !torch.int, !torch.int -> !torch.bool
%1452 = torch.aten.Int.bool %1451 : !torch.bool -> !torch.int
%int6_475 = torch.constant.int 6
%1453 = torch.aten.mul.int %1452, %int6_475 : !torch.int, !torch.int -> !torch.int
%1454 = torch.aten.add.int %1450, %1453 : !torch.int, !torch.int -> !torch.int
%int2_476 = torch.constant.int 2
%1455 = torch.aten.select.int %1442, %int0_471, %int2_476 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1456 = torch.aten.item %1455 : !torch.vtensor<[1],si64> -> !torch.int
%1457 = torch.aten.eq.int %1456, %int0_471 : !torch.int, !torch.int -> !torch.bool
%1458 = torch.aten.Int.bool %1457 : !torch.bool -> !torch.int
%int64_477 = torch.constant.int 64
%1459 = torch.aten.mul.int %1458, %int64_477 : !torch.int, !torch.int -> !torch.int
%1460 = torch.aten.add.int %1456, %1459 : !torch.int, !torch.int -> !torch.int
%int3_478 = torch.constant.int 3
%1461 = torch.aten.select.int %1442, %int0_471, %int3_478 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1462 = torch.aten.item %1461 : !torch.vtensor<[1],si64> -> !torch.int
%1463 = torch.aten.eq.int %1462, %int0_471 : !torch.int, !torch.int -> !torch.bool
%1464 = torch.aten.Int.bool %1463 : !torch.bool -> !torch.int
%1465 = torch.aten.mul.int %1464, %int0_471 : !torch.int, !torch.int -> !torch.int
%1466 = torch.aten.add.int %1462, %1465 : !torch.int, !torch.int -> !torch.int
%1467 = torch.prim.ListConstruct %1448, %1454, %1460, %1466 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1468 = torch.aten.reshape %1441, %1467 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_479 = torch.constant.int 1
%int2_480 = torch.constant.int 2
%1469 = torch.aten.transpose.int %1468, %int1_479, %int2_480 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%1470 = torch.vtensor.literal(dense_resource<__69> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_481 = torch.constant.int 0
%int0_482 = torch.constant.int 0
%1471 = torch.aten.select.int %1470, %int0_481, %int0_482 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1472 = torch.aten.item %1471 : !torch.vtensor<[1],si64> -> !torch.int
%1473 = torch.aten.eq.int %1472, %int0_481 : !torch.int, !torch.int -> !torch.bool
%1474 = torch.aten.Int.bool %1473 : !torch.bool -> !torch.int
%int1_483 = torch.constant.int 1
%1475 = torch.aten.mul.int %1474, %int1_483 : !torch.int, !torch.int -> !torch.int
%1476 = torch.aten.add.int %1472, %1475 : !torch.int, !torch.int -> !torch.int
%int1_484 = torch.constant.int 1
%1477 = torch.aten.select.int %1470, %int0_481, %int1_484 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1478 = torch.aten.item %1477 : !torch.vtensor<[1],si64> -> !torch.int
%1479 = torch.aten.eq.int %1478, %int0_481 : !torch.int, !torch.int -> !torch.bool
%1480 = torch.aten.Int.bool %1479 : !torch.bool -> !torch.int
%int6_485 = torch.constant.int 6
%1481 = torch.aten.mul.int %1480, %int6_485 : !torch.int, !torch.int -> !torch.int
%1482 = torch.aten.add.int %1478, %1481 : !torch.int, !torch.int -> !torch.int
%int2_486 = torch.constant.int 2
%1483 = torch.aten.select.int %1470, %int0_481, %int2_486 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1484 = torch.aten.item %1483 : !torch.vtensor<[1],si64> -> !torch.int
%1485 = torch.aten.eq.int %1484, %int0_481 : !torch.int, !torch.int -> !torch.bool
%1486 = torch.aten.Int.bool %1485 : !torch.bool -> !torch.int
%int12_487 = torch.constant.int 12
%1487 = torch.aten.mul.int %1486, %int12_487 : !torch.int, !torch.int -> !torch.int
%1488 = torch.aten.add.int %1484, %1487 : !torch.int, !torch.int -> !torch.int
%1489 = torch.prim.ListConstruct %1476, %1482, %1488 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1490 = torch.aten.reshape %1469, %1489 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%1491 = torch.aten.matmul %1490, %163 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_488 = torch.constant.int 1
%1492 = torch.aten.add.Tensor %43, %1491, %int1_488 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_489 = torch.constant.int 1
%1493 = torch.aten.add.Tensor %1230, %1492, %int1_489 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1494 = torch.vtensor.literal(dense_resource<__70> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_490 = torch.constant.int 0
%int0_491 = torch.constant.int 0
%1495 = torch.aten.select.int %1494, %int0_490, %int0_491 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1496 = torch.aten.item %1495 : !torch.vtensor<[1],si64> -> !torch.int
%1497 = torch.aten.eq.int %1496, %int0_490 : !torch.int, !torch.int -> !torch.bool
%1498 = torch.aten.Int.bool %1497 : !torch.bool -> !torch.int
%int1_492 = torch.constant.int 1
%1499 = torch.aten.mul.int %1498, %int1_492 : !torch.int, !torch.int -> !torch.int
%1500 = torch.aten.add.int %1496, %1499 : !torch.int, !torch.int -> !torch.int
%int1_493 = torch.constant.int 1
%1501 = torch.aten.select.int %1494, %int0_490, %int1_493 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1502 = torch.aten.item %1501 : !torch.vtensor<[1],si64> -> !torch.int
%1503 = torch.aten.eq.int %1502, %int0_490 : !torch.int, !torch.int -> !torch.bool
%1504 = torch.aten.Int.bool %1503 : !torch.bool -> !torch.int
%int6_494 = torch.constant.int 6
%1505 = torch.aten.mul.int %1504, %int6_494 : !torch.int, !torch.int -> !torch.int
%1506 = torch.aten.add.int %1502, %1505 : !torch.int, !torch.int -> !torch.int
%1507 = torch.prim.ListConstruct %1500, %1506 : (!torch.int, !torch.int) -> !torch.list<int>
%1508 = torch.aten.reshape %1493, %1507 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_495 = torch.constant.float 9.9999997473787516E-6
%int768_496 = torch.constant.int 768
%1509 = torch.prim.ListConstruct %int768_496 : (!torch.int) -> !torch.list<int>
%result0_497, %result1_498, %result2_499 = torch.aten.native_layer_norm %1508, %1509, %50, %51, %float9.999990e-06_495 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_500 = torch.constant.int 0
%int1_501 = torch.constant.int 1
%1510 = torch.aten.transpose.int %46, %int0_500, %int1_501 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%1511 = torch.aten.mm %result0_497, %1510 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%1512 = torch.aten.add.Tensor %1511, %47, %int1_501 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%1513 = torch.aten.relu %1512 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_502 = torch.constant.int 0
%int1_503 = torch.constant.int 1
%1514 = torch.aten.transpose.int %48, %int0_502, %int1_503 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%1515 = torch.aten.mm %1513, %1514 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%1516 = torch.aten.add.Tensor %1515, %49, %int1_503 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_504 = torch.constant.int 1
%1517 = torch.aten.add.Tensor %1508, %1516, %int1_504 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%1518 = torch.vtensor.literal(dense_resource<__71> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_505 = torch.constant.int 0
%int0_506 = torch.constant.int 0
%1519 = torch.aten.select.int %1518, %int0_505, %int0_506 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1520 = torch.aten.item %1519 : !torch.vtensor<[1],si64> -> !torch.int
%1521 = torch.aten.eq.int %1520, %int0_505 : !torch.int, !torch.int -> !torch.bool
%1522 = torch.aten.Int.bool %1521 : !torch.bool -> !torch.int
%int6_507 = torch.constant.int 6
%1523 = torch.aten.mul.int %1522, %int6_507 : !torch.int, !torch.int -> !torch.int
%1524 = torch.aten.add.int %1520, %1523 : !torch.int, !torch.int -> !torch.int
%int1_508 = torch.constant.int 1
%1525 = torch.aten.select.int %1518, %int0_505, %int1_508 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1526 = torch.aten.item %1525 : !torch.vtensor<[1],si64> -> !torch.int
%1527 = torch.aten.eq.int %1526, %int0_505 : !torch.int, !torch.int -> !torch.bool
%1528 = torch.aten.Int.bool %1527 : !torch.bool -> !torch.int
%int768_509 = torch.constant.int 768
%1529 = torch.aten.mul.int %1528, %int768_509 : !torch.int, !torch.int -> !torch.int
%1530 = torch.aten.add.int %1526, %1529 : !torch.int, !torch.int -> !torch.int
%int2_510 = torch.constant.int 2
%1531 = torch.aten.select.int %1518, %int0_505, %int2_510 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1532 = torch.aten.item %1531 : !torch.vtensor<[1],si64> -> !torch.int
%1533 = torch.aten.eq.int %1532, %int0_505 : !torch.int, !torch.int -> !torch.bool
%1534 = torch.aten.Int.bool %1533 : !torch.bool -> !torch.int
%1535 = torch.aten.mul.int %1534, %int0_505 : !torch.int, !torch.int -> !torch.int
%1536 = torch.aten.add.int %1532, %1535 : !torch.int, !torch.int -> !torch.int
%1537 = torch.prim.ListConstruct %1524, %1530, %1536 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1538 = torch.aten.reshape %1517, %1537 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_511 = torch.constant.float 9.9999997473787516E-6
%int768_512 = torch.constant.int 768
%1539 = torch.prim.ListConstruct %int768_512 : (!torch.int) -> !torch.list<int>
%result0_513, %result1_514, %result2_515 = torch.aten.native_layer_norm %1538, %1539, %56, %57, %float9.999990e-06_511 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%1540 = torch.aten.matmul %result0_513, %164 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_516 = torch.constant.int 1
%1541 = torch.aten.add.Tensor %54, %1540, %int1_516 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1542 = torch.vtensor.literal(dense_resource<__72> : tensor<f32>) : !torch.vtensor<[],f32>
%1543 = torch.aten.mul.Tensor %1541, %1542 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%1544 = torch.aten.matmul %result0_513, %165 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_517 = torch.constant.int 1
%1545 = torch.aten.add.Tensor %52, %1544, %int1_517 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1546 = torch.vtensor.literal(dense_resource<__73> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%1547 = torch.vtensor.literal(dense_resource<__74> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_518 = torch.constant.int 0
%int0_519 = torch.constant.int 0
%1548 = torch.aten.select.int %1546, %int0_518, %int0_519 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1549 = torch.aten.item %1548 : !torch.vtensor<[1],si64> -> !torch.int
%1550 = torch.aten.eq.int %1549, %int0_518 : !torch.int, !torch.int -> !torch.bool
%1551 = torch.aten.Int.bool %1550 : !torch.bool -> !torch.int
%int1_520 = torch.constant.int 1
%1552 = torch.aten.mul.int %1551, %int1_520 : !torch.int, !torch.int -> !torch.int
%1553 = torch.aten.add.int %1549, %1552 : !torch.int, !torch.int -> !torch.int
%int1_521 = torch.constant.int 1
%1554 = torch.aten.select.int %1546, %int0_518, %int1_521 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1555 = torch.aten.item %1554 : !torch.vtensor<[1],si64> -> !torch.int
%1556 = torch.aten.eq.int %1555, %int0_518 : !torch.int, !torch.int -> !torch.bool
%1557 = torch.aten.Int.bool %1556 : !torch.bool -> !torch.int
%int6_522 = torch.constant.int 6
%1558 = torch.aten.mul.int %1557, %int6_522 : !torch.int, !torch.int -> !torch.int
%1559 = torch.aten.add.int %1555, %1558 : !torch.int, !torch.int -> !torch.int
%int2_523 = torch.constant.int 2
%1560 = torch.aten.select.int %1546, %int0_518, %int2_523 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1561 = torch.aten.item %1560 : !torch.vtensor<[1],si64> -> !torch.int
%1562 = torch.aten.eq.int %1561, %int0_518 : !torch.int, !torch.int -> !torch.bool
%1563 = torch.aten.Int.bool %1562 : !torch.bool -> !torch.int
%int768_524 = torch.constant.int 768
%1564 = torch.aten.mul.int %1563, %int768_524 : !torch.int, !torch.int -> !torch.int
%1565 = torch.aten.add.int %1561, %1564 : !torch.int, !torch.int -> !torch.int
%int3_525 = torch.constant.int 3
%1566 = torch.aten.select.int %1546, %int0_518, %int3_525 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1567 = torch.aten.item %1566 : !torch.vtensor<[1],si64> -> !torch.int
%1568 = torch.aten.eq.int %1567, %int0_518 : !torch.int, !torch.int -> !torch.bool
%1569 = torch.aten.Int.bool %1568 : !torch.bool -> !torch.int
%1570 = torch.aten.mul.int %1569, %int0_518 : !torch.int, !torch.int -> !torch.int
%1571 = torch.aten.add.int %1567, %1570 : !torch.int, !torch.int -> !torch.int
%1572 = torch.prim.ListConstruct %1553, %1559, %1565, %1571 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1573 = torch.aten.reshape %1545, %1572 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_526 = torch.constant.int 1
%int2_527 = torch.constant.int 2
%1574 = torch.aten.transpose.int %1573, %int1_526, %int2_527 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1575 = torch.aten.matmul %result0_513, %166 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_528 = torch.constant.int 1
%1576 = torch.aten.add.Tensor %53, %1575, %int1_528 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_529 = torch.constant.int 0
%int0_530 = torch.constant.int 0
%1577 = torch.aten.select.int %1547, %int0_529, %int0_530 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1578 = torch.aten.item %1577 : !torch.vtensor<[1],si64> -> !torch.int
%1579 = torch.aten.eq.int %1578, %int0_529 : !torch.int, !torch.int -> !torch.bool
%1580 = torch.aten.Int.bool %1579 : !torch.bool -> !torch.int
%int1_531 = torch.constant.int 1
%1581 = torch.aten.mul.int %1580, %int1_531 : !torch.int, !torch.int -> !torch.int
%1582 = torch.aten.add.int %1578, %1581 : !torch.int, !torch.int -> !torch.int
%int1_532 = torch.constant.int 1
%1583 = torch.aten.select.int %1547, %int0_529, %int1_532 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1584 = torch.aten.item %1583 : !torch.vtensor<[1],si64> -> !torch.int
%1585 = torch.aten.eq.int %1584, %int0_529 : !torch.int, !torch.int -> !torch.bool
%1586 = torch.aten.Int.bool %1585 : !torch.bool -> !torch.int
%int6_533 = torch.constant.int 6
%1587 = torch.aten.mul.int %1586, %int6_533 : !torch.int, !torch.int -> !torch.int
%1588 = torch.aten.add.int %1584, %1587 : !torch.int, !torch.int -> !torch.int
%int2_534 = torch.constant.int 2
%1589 = torch.aten.select.int %1547, %int0_529, %int2_534 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1590 = torch.aten.item %1589 : !torch.vtensor<[1],si64> -> !torch.int
%1591 = torch.aten.eq.int %1590, %int0_529 : !torch.int, !torch.int -> !torch.bool
%1592 = torch.aten.Int.bool %1591 : !torch.bool -> !torch.int
%int768_535 = torch.constant.int 768
%1593 = torch.aten.mul.int %1592, %int768_535 : !torch.int, !torch.int -> !torch.int
%1594 = torch.aten.add.int %1590, %1593 : !torch.int, !torch.int -> !torch.int
%int3_536 = torch.constant.int 3
%1595 = torch.aten.select.int %1547, %int0_529, %int3_536 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1596 = torch.aten.item %1595 : !torch.vtensor<[1],si64> -> !torch.int
%1597 = torch.aten.eq.int %1596, %int0_529 : !torch.int, !torch.int -> !torch.bool
%1598 = torch.aten.Int.bool %1597 : !torch.bool -> !torch.int
%1599 = torch.aten.mul.int %1598, %int0_529 : !torch.int, !torch.int -> !torch.int
%1600 = torch.aten.add.int %1596, %1599 : !torch.int, !torch.int -> !torch.int
%1601 = torch.prim.ListConstruct %1582, %1588, %1594, %1600 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1602 = torch.aten.reshape %1576, %1601 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_537 = torch.constant.int 1
%int2_538 = torch.constant.int 2
%1603 = torch.aten.transpose.int %1602, %int1_537, %int2_538 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1604 = torch.vtensor.literal(dense_resource<__75> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_539 = torch.constant.int 0
%int0_540 = torch.constant.int 0
%1605 = torch.aten.select.int %1604, %int0_539, %int0_540 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1606 = torch.aten.item %1605 : !torch.vtensor<[1],si64> -> !torch.int
%1607 = torch.aten.eq.int %1606, %int0_539 : !torch.int, !torch.int -> !torch.bool
%1608 = torch.aten.Int.bool %1607 : !torch.bool -> !torch.int
%int1_541 = torch.constant.int 1
%1609 = torch.aten.mul.int %1608, %int1_541 : !torch.int, !torch.int -> !torch.int
%1610 = torch.aten.add.int %1606, %1609 : !torch.int, !torch.int -> !torch.int
%int1_542 = torch.constant.int 1
%1611 = torch.aten.select.int %1604, %int0_539, %int1_542 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1612 = torch.aten.item %1611 : !torch.vtensor<[1],si64> -> !torch.int
%1613 = torch.aten.eq.int %1612, %int0_539 : !torch.int, !torch.int -> !torch.bool
%1614 = torch.aten.Int.bool %1613 : !torch.bool -> !torch.int
%int6_543 = torch.constant.int 6
%1615 = torch.aten.mul.int %1614, %int6_543 : !torch.int, !torch.int -> !torch.int
%1616 = torch.aten.add.int %1612, %1615 : !torch.int, !torch.int -> !torch.int
%int2_544 = torch.constant.int 2
%1617 = torch.aten.select.int %1604, %int0_539, %int2_544 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1618 = torch.aten.item %1617 : !torch.vtensor<[1],si64> -> !torch.int
%1619 = torch.aten.eq.int %1618, %int0_539 : !torch.int, !torch.int -> !torch.bool
%1620 = torch.aten.Int.bool %1619 : !torch.bool -> !torch.int
%int768_545 = torch.constant.int 768
%1621 = torch.aten.mul.int %1620, %int768_545 : !torch.int, !torch.int -> !torch.int
%1622 = torch.aten.add.int %1618, %1621 : !torch.int, !torch.int -> !torch.int
%int3_546 = torch.constant.int 3
%1623 = torch.aten.select.int %1604, %int0_539, %int3_546 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1624 = torch.aten.item %1623 : !torch.vtensor<[1],si64> -> !torch.int
%1625 = torch.aten.eq.int %1624, %int0_539 : !torch.int, !torch.int -> !torch.bool
%1626 = torch.aten.Int.bool %1625 : !torch.bool -> !torch.int
%1627 = torch.aten.mul.int %1626, %int0_539 : !torch.int, !torch.int -> !torch.int
%1628 = torch.aten.add.int %1624, %1627 : !torch.int, !torch.int -> !torch.int
%1629 = torch.prim.ListConstruct %1610, %1616, %1622, %1628 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1630 = torch.aten.reshape %1543, %1629 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_547 = torch.constant.int 1
%int2_548 = torch.constant.int 2
%1631 = torch.aten.transpose.int %1630, %int1_547, %int2_548 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1632 = torch.vtensor.literal(dense_resource<__76> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1633 = torch.vtensor.literal(dense_resource<__77> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1634 = torch.vtensor.literal(dense_resource<__78> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_549 = torch.constant.int 0
%int0_550 = torch.constant.int 0
%1635 = torch.aten.select.int %1632, %int0_549, %int0_550 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1636 = torch.aten.item %1635 : !torch.vtensor<[1],si64> -> !torch.int
%1637 = torch.aten.eq.int %1636, %int0_549 : !torch.int, !torch.int -> !torch.bool
%1638 = torch.aten.Int.bool %1637 : !torch.bool -> !torch.int
%int1_551 = torch.constant.int 1
%1639 = torch.aten.mul.int %1638, %int1_551 : !torch.int, !torch.int -> !torch.int
%1640 = torch.aten.add.int %1636, %1639 : !torch.int, !torch.int -> !torch.int
%int1_552 = torch.constant.int 1
%1641 = torch.aten.select.int %1632, %int0_549, %int1_552 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1642 = torch.aten.item %1641 : !torch.vtensor<[1],si64> -> !torch.int
%1643 = torch.aten.eq.int %1642, %int0_549 : !torch.int, !torch.int -> !torch.bool
%1644 = torch.aten.Int.bool %1643 : !torch.bool -> !torch.int
%int12_553 = torch.constant.int 12
%1645 = torch.aten.mul.int %1644, %int12_553 : !torch.int, !torch.int -> !torch.int
%1646 = torch.aten.add.int %1642, %1645 : !torch.int, !torch.int -> !torch.int
%int2_554 = torch.constant.int 2
%1647 = torch.aten.select.int %1632, %int0_549, %int2_554 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1648 = torch.aten.item %1647 : !torch.vtensor<[1],si64> -> !torch.int
%1649 = torch.aten.eq.int %1648, %int0_549 : !torch.int, !torch.int -> !torch.bool
%1650 = torch.aten.Int.bool %1649 : !torch.bool -> !torch.int
%int6_555 = torch.constant.int 6
%1651 = torch.aten.mul.int %1650, %int6_555 : !torch.int, !torch.int -> !torch.int
%1652 = torch.aten.add.int %1648, %1651 : !torch.int, !torch.int -> !torch.int
%1653 = torch.prim.ListConstruct %1640, %1646, %1652 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1654 = torch.aten.reshape %1631, %1653 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_556 = torch.constant.int 0
%int0_557 = torch.constant.int 0
%1655 = torch.aten.select.int %1633, %int0_556, %int0_557 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1656 = torch.aten.item %1655 : !torch.vtensor<[1],si64> -> !torch.int
%1657 = torch.aten.eq.int %1656, %int0_556 : !torch.int, !torch.int -> !torch.bool
%1658 = torch.aten.Int.bool %1657 : !torch.bool -> !torch.int
%int1_558 = torch.constant.int 1
%1659 = torch.aten.mul.int %1658, %int1_558 : !torch.int, !torch.int -> !torch.int
%1660 = torch.aten.add.int %1656, %1659 : !torch.int, !torch.int -> !torch.int
%int1_559 = torch.constant.int 1
%1661 = torch.aten.select.int %1633, %int0_556, %int1_559 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1662 = torch.aten.item %1661 : !torch.vtensor<[1],si64> -> !torch.int
%1663 = torch.aten.eq.int %1662, %int0_556 : !torch.int, !torch.int -> !torch.bool
%1664 = torch.aten.Int.bool %1663 : !torch.bool -> !torch.int
%int12_560 = torch.constant.int 12
%1665 = torch.aten.mul.int %1664, %int12_560 : !torch.int, !torch.int -> !torch.int
%1666 = torch.aten.add.int %1662, %1665 : !torch.int, !torch.int -> !torch.int
%int2_561 = torch.constant.int 2
%1667 = torch.aten.select.int %1633, %int0_556, %int2_561 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1668 = torch.aten.item %1667 : !torch.vtensor<[1],si64> -> !torch.int
%1669 = torch.aten.eq.int %1668, %int0_556 : !torch.int, !torch.int -> !torch.bool
%1670 = torch.aten.Int.bool %1669 : !torch.bool -> !torch.int
%int6_562 = torch.constant.int 6
%1671 = torch.aten.mul.int %1670, %int6_562 : !torch.int, !torch.int -> !torch.int
%1672 = torch.aten.add.int %1668, %1671 : !torch.int, !torch.int -> !torch.int
%1673 = torch.prim.ListConstruct %1660, %1666, %1672 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1674 = torch.aten.reshape %1574, %1673 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_563 = torch.constant.int 0
%int0_564 = torch.constant.int 0
%1675 = torch.aten.select.int %1634, %int0_563, %int0_564 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1676 = torch.aten.item %1675 : !torch.vtensor<[1],si64> -> !torch.int
%1677 = torch.aten.eq.int %1676, %int0_563 : !torch.int, !torch.int -> !torch.bool
%1678 = torch.aten.Int.bool %1677 : !torch.bool -> !torch.int
%int1_565 = torch.constant.int 1
%1679 = torch.aten.mul.int %1678, %int1_565 : !torch.int, !torch.int -> !torch.int
%1680 = torch.aten.add.int %1676, %1679 : !torch.int, !torch.int -> !torch.int
%int1_566 = torch.constant.int 1
%1681 = torch.aten.select.int %1634, %int0_563, %int1_566 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1682 = torch.aten.item %1681 : !torch.vtensor<[1],si64> -> !torch.int
%1683 = torch.aten.eq.int %1682, %int0_563 : !torch.int, !torch.int -> !torch.bool
%1684 = torch.aten.Int.bool %1683 : !torch.bool -> !torch.int
%int12_567 = torch.constant.int 12
%1685 = torch.aten.mul.int %1684, %int12_567 : !torch.int, !torch.int -> !torch.int
%1686 = torch.aten.add.int %1682, %1685 : !torch.int, !torch.int -> !torch.int
%int2_568 = torch.constant.int 2
%1687 = torch.aten.select.int %1634, %int0_563, %int2_568 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1688 = torch.aten.item %1687 : !torch.vtensor<[1],si64> -> !torch.int
%1689 = torch.aten.eq.int %1688, %int0_563 : !torch.int, !torch.int -> !torch.bool
%1690 = torch.aten.Int.bool %1689 : !torch.bool -> !torch.int
%int6_569 = torch.constant.int 6
%1691 = torch.aten.mul.int %1690, %int6_569 : !torch.int, !torch.int -> !torch.int
%1692 = torch.aten.add.int %1688, %1691 : !torch.int, !torch.int -> !torch.int
%1693 = torch.prim.ListConstruct %1680, %1686, %1692 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1694 = torch.aten.reshape %1603, %1693 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_570 = torch.constant.int 1
%int2_571 = torch.constant.int 2
%1695 = torch.aten.transpose.int %1674, %int1_570, %int2_571 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%1696 = torch.aten.matmul %1654, %1695 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%1697 = torch.vtensor.literal(dense_resource<__79> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_572 = torch.constant.int 0
%int0_573 = torch.constant.int 0
%1698 = torch.aten.select.int %1697, %int0_572, %int0_573 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1699 = torch.aten.item %1698 : !torch.vtensor<[1],si64> -> !torch.int
%1700 = torch.aten.eq.int %1699, %int0_572 : !torch.int, !torch.int -> !torch.bool
%1701 = torch.aten.Int.bool %1700 : !torch.bool -> !torch.int
%int12_574 = torch.constant.int 12
%1702 = torch.aten.mul.int %1701, %int12_574 : !torch.int, !torch.int -> !torch.int
%1703 = torch.aten.add.int %1699, %1702 : !torch.int, !torch.int -> !torch.int
%int1_575 = torch.constant.int 1
%1704 = torch.aten.select.int %1697, %int0_572, %int1_575 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1705 = torch.aten.item %1704 : !torch.vtensor<[1],si64> -> !torch.int
%1706 = torch.aten.eq.int %1705, %int0_572 : !torch.int, !torch.int -> !torch.bool
%1707 = torch.aten.Int.bool %1706 : !torch.bool -> !torch.int
%int6_576 = torch.constant.int 6
%1708 = torch.aten.mul.int %1707, %int6_576 : !torch.int, !torch.int -> !torch.int
%1709 = torch.aten.add.int %1705, %1708 : !torch.int, !torch.int -> !torch.int
%int2_577 = torch.constant.int 2
%1710 = torch.aten.select.int %1697, %int0_572, %int2_577 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1711 = torch.aten.item %1710 : !torch.vtensor<[1],si64> -> !torch.int
%1712 = torch.aten.eq.int %1711, %int0_572 : !torch.int, !torch.int -> !torch.bool
%1713 = torch.aten.Int.bool %1712 : !torch.bool -> !torch.int
%int6_578 = torch.constant.int 6
%1714 = torch.aten.mul.int %1713, %int6_578 : !torch.int, !torch.int -> !torch.int
%1715 = torch.aten.add.int %1711, %1714 : !torch.int, !torch.int -> !torch.int
%int3_579 = torch.constant.int 3
%1716 = torch.aten.select.int %1697, %int0_572, %int3_579 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1717 = torch.aten.item %1716 : !torch.vtensor<[1],si64> -> !torch.int
%1718 = torch.aten.eq.int %1717, %int0_572 : !torch.int, !torch.int -> !torch.bool
%1719 = torch.aten.Int.bool %1718 : !torch.bool -> !torch.int
%1720 = torch.aten.mul.int %1719, %int0_572 : !torch.int, !torch.int -> !torch.int
%1721 = torch.aten.add.int %1717, %1720 : !torch.int, !torch.int -> !torch.int
%1722 = torch.prim.ListConstruct %1703, %1709, %1715, %1721 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1723 = torch.aten.reshape %1696, %1722 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_580 = torch.constant.int 1
%1724 = torch.aten.add.Tensor %1723, %277, %int1_580 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%1725 = torch.vtensor.literal(dense_resource<__80> : tensor<f32>) : !torch.vtensor<[],f32>
%1726 = torch.aten.maximum %1724, %1725 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%1727 = torch.vtensor.literal(dense_resource<__81> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_581 = torch.constant.int 0
%int0_582 = torch.constant.int 0
%1728 = torch.aten.select.int %1727, %int0_581, %int0_582 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1729 = torch.aten.item %1728 : !torch.vtensor<[1],si64> -> !torch.int
%1730 = torch.aten.eq.int %1729, %int0_581 : !torch.int, !torch.int -> !torch.bool
%1731 = torch.aten.Int.bool %1730 : !torch.bool -> !torch.int
%int-1_583 = torch.constant.int -1
%1732 = torch.aten.mul.int %1731, %int-1_583 : !torch.int, !torch.int -> !torch.int
%1733 = torch.aten.add.int %1729, %1732 : !torch.int, !torch.int -> !torch.int
%int1_584 = torch.constant.int 1
%1734 = torch.aten.select.int %1727, %int0_581, %int1_584 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1735 = torch.aten.item %1734 : !torch.vtensor<[1],si64> -> !torch.int
%1736 = torch.aten.eq.int %1735, %int0_581 : !torch.int, !torch.int -> !torch.bool
%1737 = torch.aten.Int.bool %1736 : !torch.bool -> !torch.int
%int12_585 = torch.constant.int 12
%1738 = torch.aten.mul.int %1737, %int12_585 : !torch.int, !torch.int -> !torch.int
%1739 = torch.aten.add.int %1735, %1738 : !torch.int, !torch.int -> !torch.int
%int2_586 = torch.constant.int 2
%1740 = torch.aten.select.int %1727, %int0_581, %int2_586 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1741 = torch.aten.item %1740 : !torch.vtensor<[1],si64> -> !torch.int
%1742 = torch.aten.eq.int %1741, %int0_581 : !torch.int, !torch.int -> !torch.bool
%1743 = torch.aten.Int.bool %1742 : !torch.bool -> !torch.int
%int6_587 = torch.constant.int 6
%1744 = torch.aten.mul.int %1743, %int6_587 : !torch.int, !torch.int -> !torch.int
%1745 = torch.aten.add.int %1741, %1744 : !torch.int, !torch.int -> !torch.int
%1746 = torch.prim.ListConstruct %1733, %1739, %1745 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1747 = torch.aten.reshape %1726, %1746 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_588 = torch.constant.int 2
%none_589 = torch.constant.none
%1748 = torch.aten.softmax.int %1747, %int2_588, %none_589 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%1749 = torch.aten.matmul %1748, %1694 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%1750 = torch.vtensor.literal(dense_resource<__82> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_590 = torch.constant.int 0
%int0_591 = torch.constant.int 0
%1751 = torch.aten.select.int %1750, %int0_590, %int0_591 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1752 = torch.aten.item %1751 : !torch.vtensor<[1],si64> -> !torch.int
%1753 = torch.aten.eq.int %1752, %int0_590 : !torch.int, !torch.int -> !torch.bool
%1754 = torch.aten.Int.bool %1753 : !torch.bool -> !torch.int
%int12_592 = torch.constant.int 12
%1755 = torch.aten.mul.int %1754, %int12_592 : !torch.int, !torch.int -> !torch.int
%1756 = torch.aten.add.int %1752, %1755 : !torch.int, !torch.int -> !torch.int
%int1_593 = torch.constant.int 1
%1757 = torch.aten.select.int %1750, %int0_590, %int1_593 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1758 = torch.aten.item %1757 : !torch.vtensor<[1],si64> -> !torch.int
%1759 = torch.aten.eq.int %1758, %int0_590 : !torch.int, !torch.int -> !torch.bool
%1760 = torch.aten.Int.bool %1759 : !torch.bool -> !torch.int
%int6_594 = torch.constant.int 6
%1761 = torch.aten.mul.int %1760, %int6_594 : !torch.int, !torch.int -> !torch.int
%1762 = torch.aten.add.int %1758, %1761 : !torch.int, !torch.int -> !torch.int
%int2_595 = torch.constant.int 2
%1763 = torch.aten.select.int %1750, %int0_590, %int2_595 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1764 = torch.aten.item %1763 : !torch.vtensor<[1],si64> -> !torch.int
%1765 = torch.aten.eq.int %1764, %int0_590 : !torch.int, !torch.int -> !torch.bool
%1766 = torch.aten.Int.bool %1765 : !torch.bool -> !torch.int
%int64_596 = torch.constant.int 64
%1767 = torch.aten.mul.int %1766, %int64_596 : !torch.int, !torch.int -> !torch.int
%1768 = torch.aten.add.int %1764, %1767 : !torch.int, !torch.int -> !torch.int
%int3_597 = torch.constant.int 3
%1769 = torch.aten.select.int %1750, %int0_590, %int3_597 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1770 = torch.aten.item %1769 : !torch.vtensor<[1],si64> -> !torch.int
%1771 = torch.aten.eq.int %1770, %int0_590 : !torch.int, !torch.int -> !torch.bool
%1772 = torch.aten.Int.bool %1771 : !torch.bool -> !torch.int
%1773 = torch.aten.mul.int %1772, %int0_590 : !torch.int, !torch.int -> !torch.int
%1774 = torch.aten.add.int %1770, %1773 : !torch.int, !torch.int -> !torch.int
%1775 = torch.prim.ListConstruct %1756, %1762, %1768, %1774 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1776 = torch.aten.reshape %1749, %1775 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_598 = torch.constant.int 1
%int2_599 = torch.constant.int 2
%1777 = torch.aten.transpose.int %1776, %int1_598, %int2_599 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%1778 = torch.vtensor.literal(dense_resource<__83> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_600 = torch.constant.int 0
%int0_601 = torch.constant.int 0
%1779 = torch.aten.select.int %1778, %int0_600, %int0_601 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1780 = torch.aten.item %1779 : !torch.vtensor<[1],si64> -> !torch.int
%1781 = torch.aten.eq.int %1780, %int0_600 : !torch.int, !torch.int -> !torch.bool
%1782 = torch.aten.Int.bool %1781 : !torch.bool -> !torch.int
%int1_602 = torch.constant.int 1
%1783 = torch.aten.mul.int %1782, %int1_602 : !torch.int, !torch.int -> !torch.int
%1784 = torch.aten.add.int %1780, %1783 : !torch.int, !torch.int -> !torch.int
%int1_603 = torch.constant.int 1
%1785 = torch.aten.select.int %1778, %int0_600, %int1_603 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1786 = torch.aten.item %1785 : !torch.vtensor<[1],si64> -> !torch.int
%1787 = torch.aten.eq.int %1786, %int0_600 : !torch.int, !torch.int -> !torch.bool
%1788 = torch.aten.Int.bool %1787 : !torch.bool -> !torch.int
%int6_604 = torch.constant.int 6
%1789 = torch.aten.mul.int %1788, %int6_604 : !torch.int, !torch.int -> !torch.int
%1790 = torch.aten.add.int %1786, %1789 : !torch.int, !torch.int -> !torch.int
%int2_605 = torch.constant.int 2
%1791 = torch.aten.select.int %1778, %int0_600, %int2_605 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1792 = torch.aten.item %1791 : !torch.vtensor<[1],si64> -> !torch.int
%1793 = torch.aten.eq.int %1792, %int0_600 : !torch.int, !torch.int -> !torch.bool
%1794 = torch.aten.Int.bool %1793 : !torch.bool -> !torch.int
%int12_606 = torch.constant.int 12
%1795 = torch.aten.mul.int %1794, %int12_606 : !torch.int, !torch.int -> !torch.int
%1796 = torch.aten.add.int %1792, %1795 : !torch.int, !torch.int -> !torch.int
%1797 = torch.prim.ListConstruct %1784, %1790, %1796 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1798 = torch.aten.reshape %1777, %1797 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%1799 = torch.aten.matmul %1798, %167 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_607 = torch.constant.int 1
%1800 = torch.aten.add.Tensor %55, %1799, %int1_607 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_608 = torch.constant.int 1
%1801 = torch.aten.add.Tensor %1538, %1800, %int1_608 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1802 = torch.vtensor.literal(dense_resource<__84> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_609 = torch.constant.int 0
%int0_610 = torch.constant.int 0
%1803 = torch.aten.select.int %1802, %int0_609, %int0_610 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1804 = torch.aten.item %1803 : !torch.vtensor<[1],si64> -> !torch.int
%1805 = torch.aten.eq.int %1804, %int0_609 : !torch.int, !torch.int -> !torch.bool
%1806 = torch.aten.Int.bool %1805 : !torch.bool -> !torch.int
%int1_611 = torch.constant.int 1
%1807 = torch.aten.mul.int %1806, %int1_611 : !torch.int, !torch.int -> !torch.int
%1808 = torch.aten.add.int %1804, %1807 : !torch.int, !torch.int -> !torch.int
%int1_612 = torch.constant.int 1
%1809 = torch.aten.select.int %1802, %int0_609, %int1_612 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1810 = torch.aten.item %1809 : !torch.vtensor<[1],si64> -> !torch.int
%1811 = torch.aten.eq.int %1810, %int0_609 : !torch.int, !torch.int -> !torch.bool
%1812 = torch.aten.Int.bool %1811 : !torch.bool -> !torch.int
%int6_613 = torch.constant.int 6
%1813 = torch.aten.mul.int %1812, %int6_613 : !torch.int, !torch.int -> !torch.int
%1814 = torch.aten.add.int %1810, %1813 : !torch.int, !torch.int -> !torch.int
%1815 = torch.prim.ListConstruct %1808, %1814 : (!torch.int, !torch.int) -> !torch.list<int>
%1816 = torch.aten.reshape %1801, %1815 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_614 = torch.constant.float 9.9999997473787516E-6
%int768_615 = torch.constant.int 768
%1817 = torch.prim.ListConstruct %int768_615 : (!torch.int) -> !torch.list<int>
%result0_616, %result1_617, %result2_618 = torch.aten.native_layer_norm %1816, %1817, %62, %63, %float9.999990e-06_614 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_619 = torch.constant.int 0
%int1_620 = torch.constant.int 1
%1818 = torch.aten.transpose.int %58, %int0_619, %int1_620 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%1819 = torch.aten.mm %result0_616, %1818 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%1820 = torch.aten.add.Tensor %1819, %59, %int1_620 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%1821 = torch.aten.relu %1820 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_621 = torch.constant.int 0
%int1_622 = torch.constant.int 1
%1822 = torch.aten.transpose.int %60, %int0_621, %int1_622 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%1823 = torch.aten.mm %1821, %1822 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%1824 = torch.aten.add.Tensor %1823, %61, %int1_622 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_623 = torch.constant.int 1
%1825 = torch.aten.add.Tensor %1816, %1824, %int1_623 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%1826 = torch.vtensor.literal(dense_resource<__85> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_624 = torch.constant.int 0
%int0_625 = torch.constant.int 0
%1827 = torch.aten.select.int %1826, %int0_624, %int0_625 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1828 = torch.aten.item %1827 : !torch.vtensor<[1],si64> -> !torch.int
%1829 = torch.aten.eq.int %1828, %int0_624 : !torch.int, !torch.int -> !torch.bool
%1830 = torch.aten.Int.bool %1829 : !torch.bool -> !torch.int
%int6_626 = torch.constant.int 6
%1831 = torch.aten.mul.int %1830, %int6_626 : !torch.int, !torch.int -> !torch.int
%1832 = torch.aten.add.int %1828, %1831 : !torch.int, !torch.int -> !torch.int
%int1_627 = torch.constant.int 1
%1833 = torch.aten.select.int %1826, %int0_624, %int1_627 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1834 = torch.aten.item %1833 : !torch.vtensor<[1],si64> -> !torch.int
%1835 = torch.aten.eq.int %1834, %int0_624 : !torch.int, !torch.int -> !torch.bool
%1836 = torch.aten.Int.bool %1835 : !torch.bool -> !torch.int
%int768_628 = torch.constant.int 768
%1837 = torch.aten.mul.int %1836, %int768_628 : !torch.int, !torch.int -> !torch.int
%1838 = torch.aten.add.int %1834, %1837 : !torch.int, !torch.int -> !torch.int
%int2_629 = torch.constant.int 2
%1839 = torch.aten.select.int %1826, %int0_624, %int2_629 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1840 = torch.aten.item %1839 : !torch.vtensor<[1],si64> -> !torch.int
%1841 = torch.aten.eq.int %1840, %int0_624 : !torch.int, !torch.int -> !torch.bool
%1842 = torch.aten.Int.bool %1841 : !torch.bool -> !torch.int
%1843 = torch.aten.mul.int %1842, %int0_624 : !torch.int, !torch.int -> !torch.int
%1844 = torch.aten.add.int %1840, %1843 : !torch.int, !torch.int -> !torch.int
%1845 = torch.prim.ListConstruct %1832, %1838, %1844 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1846 = torch.aten.reshape %1825, %1845 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_630 = torch.constant.float 9.9999997473787516E-6
%int768_631 = torch.constant.int 768
%1847 = torch.prim.ListConstruct %int768_631 : (!torch.int) -> !torch.list<int>
%result0_632, %result1_633, %result2_634 = torch.aten.native_layer_norm %1846, %1847, %68, %69, %float9.999990e-06_630 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%1848 = torch.aten.matmul %result0_632, %168 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_635 = torch.constant.int 1
%1849 = torch.aten.add.Tensor %66, %1848, %int1_635 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1850 = torch.vtensor.literal(dense_resource<__86> : tensor<f32>) : !torch.vtensor<[],f32>
%1851 = torch.aten.mul.Tensor %1849, %1850 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%1852 = torch.aten.matmul %result0_632, %169 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_636 = torch.constant.int 1
%1853 = torch.aten.add.Tensor %64, %1852, %int1_636 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1854 = torch.vtensor.literal(dense_resource<__87> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%1855 = torch.vtensor.literal(dense_resource<__88> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_637 = torch.constant.int 0
%int0_638 = torch.constant.int 0
%1856 = torch.aten.select.int %1854, %int0_637, %int0_638 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1857 = torch.aten.item %1856 : !torch.vtensor<[1],si64> -> !torch.int
%1858 = torch.aten.eq.int %1857, %int0_637 : !torch.int, !torch.int -> !torch.bool
%1859 = torch.aten.Int.bool %1858 : !torch.bool -> !torch.int
%int1_639 = torch.constant.int 1
%1860 = torch.aten.mul.int %1859, %int1_639 : !torch.int, !torch.int -> !torch.int
%1861 = torch.aten.add.int %1857, %1860 : !torch.int, !torch.int -> !torch.int
%int1_640 = torch.constant.int 1
%1862 = torch.aten.select.int %1854, %int0_637, %int1_640 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1863 = torch.aten.item %1862 : !torch.vtensor<[1],si64> -> !torch.int
%1864 = torch.aten.eq.int %1863, %int0_637 : !torch.int, !torch.int -> !torch.bool
%1865 = torch.aten.Int.bool %1864 : !torch.bool -> !torch.int
%int6_641 = torch.constant.int 6
%1866 = torch.aten.mul.int %1865, %int6_641 : !torch.int, !torch.int -> !torch.int
%1867 = torch.aten.add.int %1863, %1866 : !torch.int, !torch.int -> !torch.int
%int2_642 = torch.constant.int 2
%1868 = torch.aten.select.int %1854, %int0_637, %int2_642 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1869 = torch.aten.item %1868 : !torch.vtensor<[1],si64> -> !torch.int
%1870 = torch.aten.eq.int %1869, %int0_637 : !torch.int, !torch.int -> !torch.bool
%1871 = torch.aten.Int.bool %1870 : !torch.bool -> !torch.int
%int768_643 = torch.constant.int 768
%1872 = torch.aten.mul.int %1871, %int768_643 : !torch.int, !torch.int -> !torch.int
%1873 = torch.aten.add.int %1869, %1872 : !torch.int, !torch.int -> !torch.int
%int3_644 = torch.constant.int 3
%1874 = torch.aten.select.int %1854, %int0_637, %int3_644 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1875 = torch.aten.item %1874 : !torch.vtensor<[1],si64> -> !torch.int
%1876 = torch.aten.eq.int %1875, %int0_637 : !torch.int, !torch.int -> !torch.bool
%1877 = torch.aten.Int.bool %1876 : !torch.bool -> !torch.int
%1878 = torch.aten.mul.int %1877, %int0_637 : !torch.int, !torch.int -> !torch.int
%1879 = torch.aten.add.int %1875, %1878 : !torch.int, !torch.int -> !torch.int
%1880 = torch.prim.ListConstruct %1861, %1867, %1873, %1879 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1881 = torch.aten.reshape %1853, %1880 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_645 = torch.constant.int 1
%int2_646 = torch.constant.int 2
%1882 = torch.aten.transpose.int %1881, %int1_645, %int2_646 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1883 = torch.aten.matmul %result0_632, %170 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_647 = torch.constant.int 1
%1884 = torch.aten.add.Tensor %65, %1883, %int1_647 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_648 = torch.constant.int 0
%int0_649 = torch.constant.int 0
%1885 = torch.aten.select.int %1855, %int0_648, %int0_649 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1886 = torch.aten.item %1885 : !torch.vtensor<[1],si64> -> !torch.int
%1887 = torch.aten.eq.int %1886, %int0_648 : !torch.int, !torch.int -> !torch.bool
%1888 = torch.aten.Int.bool %1887 : !torch.bool -> !torch.int
%int1_650 = torch.constant.int 1
%1889 = torch.aten.mul.int %1888, %int1_650 : !torch.int, !torch.int -> !torch.int
%1890 = torch.aten.add.int %1886, %1889 : !torch.int, !torch.int -> !torch.int
%int1_651 = torch.constant.int 1
%1891 = torch.aten.select.int %1855, %int0_648, %int1_651 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1892 = torch.aten.item %1891 : !torch.vtensor<[1],si64> -> !torch.int
%1893 = torch.aten.eq.int %1892, %int0_648 : !torch.int, !torch.int -> !torch.bool
%1894 = torch.aten.Int.bool %1893 : !torch.bool -> !torch.int
%int6_652 = torch.constant.int 6
%1895 = torch.aten.mul.int %1894, %int6_652 : !torch.int, !torch.int -> !torch.int
%1896 = torch.aten.add.int %1892, %1895 : !torch.int, !torch.int -> !torch.int
%int2_653 = torch.constant.int 2
%1897 = torch.aten.select.int %1855, %int0_648, %int2_653 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1898 = torch.aten.item %1897 : !torch.vtensor<[1],si64> -> !torch.int
%1899 = torch.aten.eq.int %1898, %int0_648 : !torch.int, !torch.int -> !torch.bool
%1900 = torch.aten.Int.bool %1899 : !torch.bool -> !torch.int
%int768_654 = torch.constant.int 768
%1901 = torch.aten.mul.int %1900, %int768_654 : !torch.int, !torch.int -> !torch.int
%1902 = torch.aten.add.int %1898, %1901 : !torch.int, !torch.int -> !torch.int
%int3_655 = torch.constant.int 3
%1903 = torch.aten.select.int %1855, %int0_648, %int3_655 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1904 = torch.aten.item %1903 : !torch.vtensor<[1],si64> -> !torch.int
%1905 = torch.aten.eq.int %1904, %int0_648 : !torch.int, !torch.int -> !torch.bool
%1906 = torch.aten.Int.bool %1905 : !torch.bool -> !torch.int
%1907 = torch.aten.mul.int %1906, %int0_648 : !torch.int, !torch.int -> !torch.int
%1908 = torch.aten.add.int %1904, %1907 : !torch.int, !torch.int -> !torch.int
%1909 = torch.prim.ListConstruct %1890, %1896, %1902, %1908 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1910 = torch.aten.reshape %1884, %1909 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_656 = torch.constant.int 1
%int2_657 = torch.constant.int 2
%1911 = torch.aten.transpose.int %1910, %int1_656, %int2_657 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1912 = torch.vtensor.literal(dense_resource<__89> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_658 = torch.constant.int 0
%int0_659 = torch.constant.int 0
%1913 = torch.aten.select.int %1912, %int0_658, %int0_659 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1914 = torch.aten.item %1913 : !torch.vtensor<[1],si64> -> !torch.int
%1915 = torch.aten.eq.int %1914, %int0_658 : !torch.int, !torch.int -> !torch.bool
%1916 = torch.aten.Int.bool %1915 : !torch.bool -> !torch.int
%int1_660 = torch.constant.int 1
%1917 = torch.aten.mul.int %1916, %int1_660 : !torch.int, !torch.int -> !torch.int
%1918 = torch.aten.add.int %1914, %1917 : !torch.int, !torch.int -> !torch.int
%int1_661 = torch.constant.int 1
%1919 = torch.aten.select.int %1912, %int0_658, %int1_661 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1920 = torch.aten.item %1919 : !torch.vtensor<[1],si64> -> !torch.int
%1921 = torch.aten.eq.int %1920, %int0_658 : !torch.int, !torch.int -> !torch.bool
%1922 = torch.aten.Int.bool %1921 : !torch.bool -> !torch.int
%int6_662 = torch.constant.int 6
%1923 = torch.aten.mul.int %1922, %int6_662 : !torch.int, !torch.int -> !torch.int
%1924 = torch.aten.add.int %1920, %1923 : !torch.int, !torch.int -> !torch.int
%int2_663 = torch.constant.int 2
%1925 = torch.aten.select.int %1912, %int0_658, %int2_663 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1926 = torch.aten.item %1925 : !torch.vtensor<[1],si64> -> !torch.int
%1927 = torch.aten.eq.int %1926, %int0_658 : !torch.int, !torch.int -> !torch.bool
%1928 = torch.aten.Int.bool %1927 : !torch.bool -> !torch.int
%int768_664 = torch.constant.int 768
%1929 = torch.aten.mul.int %1928, %int768_664 : !torch.int, !torch.int -> !torch.int
%1930 = torch.aten.add.int %1926, %1929 : !torch.int, !torch.int -> !torch.int
%int3_665 = torch.constant.int 3
%1931 = torch.aten.select.int %1912, %int0_658, %int3_665 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1932 = torch.aten.item %1931 : !torch.vtensor<[1],si64> -> !torch.int
%1933 = torch.aten.eq.int %1932, %int0_658 : !torch.int, !torch.int -> !torch.bool
%1934 = torch.aten.Int.bool %1933 : !torch.bool -> !torch.int
%1935 = torch.aten.mul.int %1934, %int0_658 : !torch.int, !torch.int -> !torch.int
%1936 = torch.aten.add.int %1932, %1935 : !torch.int, !torch.int -> !torch.int
%1937 = torch.prim.ListConstruct %1918, %1924, %1930, %1936 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1938 = torch.aten.reshape %1851, %1937 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_666 = torch.constant.int 1
%int2_667 = torch.constant.int 2
%1939 = torch.aten.transpose.int %1938, %int1_666, %int2_667 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1940 = torch.vtensor.literal(dense_resource<__90> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1941 = torch.vtensor.literal(dense_resource<__91> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1942 = torch.vtensor.literal(dense_resource<__92> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_668 = torch.constant.int 0
%int0_669 = torch.constant.int 0
%1943 = torch.aten.select.int %1940, %int0_668, %int0_669 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1944 = torch.aten.item %1943 : !torch.vtensor<[1],si64> -> !torch.int
%1945 = torch.aten.eq.int %1944, %int0_668 : !torch.int, !torch.int -> !torch.bool
%1946 = torch.aten.Int.bool %1945 : !torch.bool -> !torch.int
%int1_670 = torch.constant.int 1
%1947 = torch.aten.mul.int %1946, %int1_670 : !torch.int, !torch.int -> !torch.int
%1948 = torch.aten.add.int %1944, %1947 : !torch.int, !torch.int -> !torch.int
%int1_671 = torch.constant.int 1
%1949 = torch.aten.select.int %1940, %int0_668, %int1_671 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1950 = torch.aten.item %1949 : !torch.vtensor<[1],si64> -> !torch.int
%1951 = torch.aten.eq.int %1950, %int0_668 : !torch.int, !torch.int -> !torch.bool
%1952 = torch.aten.Int.bool %1951 : !torch.bool -> !torch.int
%int12_672 = torch.constant.int 12
%1953 = torch.aten.mul.int %1952, %int12_672 : !torch.int, !torch.int -> !torch.int
%1954 = torch.aten.add.int %1950, %1953 : !torch.int, !torch.int -> !torch.int
%int2_673 = torch.constant.int 2
%1955 = torch.aten.select.int %1940, %int0_668, %int2_673 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1956 = torch.aten.item %1955 : !torch.vtensor<[1],si64> -> !torch.int
%1957 = torch.aten.eq.int %1956, %int0_668 : !torch.int, !torch.int -> !torch.bool
%1958 = torch.aten.Int.bool %1957 : !torch.bool -> !torch.int
%int6_674 = torch.constant.int 6
%1959 = torch.aten.mul.int %1958, %int6_674 : !torch.int, !torch.int -> !torch.int
%1960 = torch.aten.add.int %1956, %1959 : !torch.int, !torch.int -> !torch.int
%1961 = torch.prim.ListConstruct %1948, %1954, %1960 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1962 = torch.aten.reshape %1939, %1961 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_675 = torch.constant.int 0
%int0_676 = torch.constant.int 0
%1963 = torch.aten.select.int %1941, %int0_675, %int0_676 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1964 = torch.aten.item %1963 : !torch.vtensor<[1],si64> -> !torch.int
%1965 = torch.aten.eq.int %1964, %int0_675 : !torch.int, !torch.int -> !torch.bool
%1966 = torch.aten.Int.bool %1965 : !torch.bool -> !torch.int
%int1_677 = torch.constant.int 1
%1967 = torch.aten.mul.int %1966, %int1_677 : !torch.int, !torch.int -> !torch.int
%1968 = torch.aten.add.int %1964, %1967 : !torch.int, !torch.int -> !torch.int
%int1_678 = torch.constant.int 1
%1969 = torch.aten.select.int %1941, %int0_675, %int1_678 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1970 = torch.aten.item %1969 : !torch.vtensor<[1],si64> -> !torch.int
%1971 = torch.aten.eq.int %1970, %int0_675 : !torch.int, !torch.int -> !torch.bool
%1972 = torch.aten.Int.bool %1971 : !torch.bool -> !torch.int
%int12_679 = torch.constant.int 12
%1973 = torch.aten.mul.int %1972, %int12_679 : !torch.int, !torch.int -> !torch.int
%1974 = torch.aten.add.int %1970, %1973 : !torch.int, !torch.int -> !torch.int
%int2_680 = torch.constant.int 2
%1975 = torch.aten.select.int %1941, %int0_675, %int2_680 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1976 = torch.aten.item %1975 : !torch.vtensor<[1],si64> -> !torch.int
%1977 = torch.aten.eq.int %1976, %int0_675 : !torch.int, !torch.int -> !torch.bool
%1978 = torch.aten.Int.bool %1977 : !torch.bool -> !torch.int
%int6_681 = torch.constant.int 6
%1979 = torch.aten.mul.int %1978, %int6_681 : !torch.int, !torch.int -> !torch.int
%1980 = torch.aten.add.int %1976, %1979 : !torch.int, !torch.int -> !torch.int
%1981 = torch.prim.ListConstruct %1968, %1974, %1980 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1982 = torch.aten.reshape %1882, %1981 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_682 = torch.constant.int 0
%int0_683 = torch.constant.int 0
%1983 = torch.aten.select.int %1942, %int0_682, %int0_683 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1984 = torch.aten.item %1983 : !torch.vtensor<[1],si64> -> !torch.int
%1985 = torch.aten.eq.int %1984, %int0_682 : !torch.int, !torch.int -> !torch.bool
%1986 = torch.aten.Int.bool %1985 : !torch.bool -> !torch.int
%int1_684 = torch.constant.int 1
%1987 = torch.aten.mul.int %1986, %int1_684 : !torch.int, !torch.int -> !torch.int
%1988 = torch.aten.add.int %1984, %1987 : !torch.int, !torch.int -> !torch.int
%int1_685 = torch.constant.int 1
%1989 = torch.aten.select.int %1942, %int0_682, %int1_685 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1990 = torch.aten.item %1989 : !torch.vtensor<[1],si64> -> !torch.int
%1991 = torch.aten.eq.int %1990, %int0_682 : !torch.int, !torch.int -> !torch.bool
%1992 = torch.aten.Int.bool %1991 : !torch.bool -> !torch.int
%int12_686 = torch.constant.int 12
%1993 = torch.aten.mul.int %1992, %int12_686 : !torch.int, !torch.int -> !torch.int
%1994 = torch.aten.add.int %1990, %1993 : !torch.int, !torch.int -> !torch.int
%int2_687 = torch.constant.int 2
%1995 = torch.aten.select.int %1942, %int0_682, %int2_687 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1996 = torch.aten.item %1995 : !torch.vtensor<[1],si64> -> !torch.int
%1997 = torch.aten.eq.int %1996, %int0_682 : !torch.int, !torch.int -> !torch.bool
%1998 = torch.aten.Int.bool %1997 : !torch.bool -> !torch.int
%int6_688 = torch.constant.int 6
%1999 = torch.aten.mul.int %1998, %int6_688 : !torch.int, !torch.int -> !torch.int
%2000 = torch.aten.add.int %1996, %1999 : !torch.int, !torch.int -> !torch.int
%2001 = torch.prim.ListConstruct %1988, %1994, %2000 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2002 = torch.aten.reshape %1911, %2001 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_689 = torch.constant.int 1
%int2_690 = torch.constant.int 2
%2003 = torch.aten.transpose.int %1982, %int1_689, %int2_690 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%2004 = torch.aten.matmul %1962, %2003 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%2005 = torch.vtensor.literal(dense_resource<__93> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_691 = torch.constant.int 0
%int0_692 = torch.constant.int 0
%2006 = torch.aten.select.int %2005, %int0_691, %int0_692 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2007 = torch.aten.item %2006 : !torch.vtensor<[1],si64> -> !torch.int
%2008 = torch.aten.eq.int %2007, %int0_691 : !torch.int, !torch.int -> !torch.bool
%2009 = torch.aten.Int.bool %2008 : !torch.bool -> !torch.int
%int12_693 = torch.constant.int 12
%2010 = torch.aten.mul.int %2009, %int12_693 : !torch.int, !torch.int -> !torch.int
%2011 = torch.aten.add.int %2007, %2010 : !torch.int, !torch.int -> !torch.int
%int1_694 = torch.constant.int 1
%2012 = torch.aten.select.int %2005, %int0_691, %int1_694 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2013 = torch.aten.item %2012 : !torch.vtensor<[1],si64> -> !torch.int
%2014 = torch.aten.eq.int %2013, %int0_691 : !torch.int, !torch.int -> !torch.bool
%2015 = torch.aten.Int.bool %2014 : !torch.bool -> !torch.int
%int6_695 = torch.constant.int 6
%2016 = torch.aten.mul.int %2015, %int6_695 : !torch.int, !torch.int -> !torch.int
%2017 = torch.aten.add.int %2013, %2016 : !torch.int, !torch.int -> !torch.int
%int2_696 = torch.constant.int 2
%2018 = torch.aten.select.int %2005, %int0_691, %int2_696 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2019 = torch.aten.item %2018 : !torch.vtensor<[1],si64> -> !torch.int
%2020 = torch.aten.eq.int %2019, %int0_691 : !torch.int, !torch.int -> !torch.bool
%2021 = torch.aten.Int.bool %2020 : !torch.bool -> !torch.int
%int6_697 = torch.constant.int 6
%2022 = torch.aten.mul.int %2021, %int6_697 : !torch.int, !torch.int -> !torch.int
%2023 = torch.aten.add.int %2019, %2022 : !torch.int, !torch.int -> !torch.int
%int3_698 = torch.constant.int 3
%2024 = torch.aten.select.int %2005, %int0_691, %int3_698 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2025 = torch.aten.item %2024 : !torch.vtensor<[1],si64> -> !torch.int
%2026 = torch.aten.eq.int %2025, %int0_691 : !torch.int, !torch.int -> !torch.bool
%2027 = torch.aten.Int.bool %2026 : !torch.bool -> !torch.int
%2028 = torch.aten.mul.int %2027, %int0_691 : !torch.int, !torch.int -> !torch.int
%2029 = torch.aten.add.int %2025, %2028 : !torch.int, !torch.int -> !torch.int
%2030 = torch.prim.ListConstruct %2011, %2017, %2023, %2029 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2031 = torch.aten.reshape %2004, %2030 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_699 = torch.constant.int 1
%2032 = torch.aten.add.Tensor %2031, %277, %int1_699 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%2033 = torch.vtensor.literal(dense_resource<__94> : tensor<f32>) : !torch.vtensor<[],f32>
%2034 = torch.aten.maximum %2032, %2033 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%2035 = torch.vtensor.literal(dense_resource<__95> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_700 = torch.constant.int 0
%int0_701 = torch.constant.int 0
%2036 = torch.aten.select.int %2035, %int0_700, %int0_701 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2037 = torch.aten.item %2036 : !torch.vtensor<[1],si64> -> !torch.int
%2038 = torch.aten.eq.int %2037, %int0_700 : !torch.int, !torch.int -> !torch.bool
%2039 = torch.aten.Int.bool %2038 : !torch.bool -> !torch.int
%int-1_702 = torch.constant.int -1
%2040 = torch.aten.mul.int %2039, %int-1_702 : !torch.int, !torch.int -> !torch.int
%2041 = torch.aten.add.int %2037, %2040 : !torch.int, !torch.int -> !torch.int
%int1_703 = torch.constant.int 1
%2042 = torch.aten.select.int %2035, %int0_700, %int1_703 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2043 = torch.aten.item %2042 : !torch.vtensor<[1],si64> -> !torch.int
%2044 = torch.aten.eq.int %2043, %int0_700 : !torch.int, !torch.int -> !torch.bool
%2045 = torch.aten.Int.bool %2044 : !torch.bool -> !torch.int
%int12_704 = torch.constant.int 12
%2046 = torch.aten.mul.int %2045, %int12_704 : !torch.int, !torch.int -> !torch.int
%2047 = torch.aten.add.int %2043, %2046 : !torch.int, !torch.int -> !torch.int
%int2_705 = torch.constant.int 2
%2048 = torch.aten.select.int %2035, %int0_700, %int2_705 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2049 = torch.aten.item %2048 : !torch.vtensor<[1],si64> -> !torch.int
%2050 = torch.aten.eq.int %2049, %int0_700 : !torch.int, !torch.int -> !torch.bool
%2051 = torch.aten.Int.bool %2050 : !torch.bool -> !torch.int
%int6_706 = torch.constant.int 6
%2052 = torch.aten.mul.int %2051, %int6_706 : !torch.int, !torch.int -> !torch.int
%2053 = torch.aten.add.int %2049, %2052 : !torch.int, !torch.int -> !torch.int
%2054 = torch.prim.ListConstruct %2041, %2047, %2053 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2055 = torch.aten.reshape %2034, %2054 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_707 = torch.constant.int 2
%none_708 = torch.constant.none
%2056 = torch.aten.softmax.int %2055, %int2_707, %none_708 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%2057 = torch.aten.matmul %2056, %2002 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%2058 = torch.vtensor.literal(dense_resource<__96> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_709 = torch.constant.int 0
%int0_710 = torch.constant.int 0
%2059 = torch.aten.select.int %2058, %int0_709, %int0_710 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2060 = torch.aten.item %2059 : !torch.vtensor<[1],si64> -> !torch.int
%2061 = torch.aten.eq.int %2060, %int0_709 : !torch.int, !torch.int -> !torch.bool
%2062 = torch.aten.Int.bool %2061 : !torch.bool -> !torch.int
%int12_711 = torch.constant.int 12
%2063 = torch.aten.mul.int %2062, %int12_711 : !torch.int, !torch.int -> !torch.int
%2064 = torch.aten.add.int %2060, %2063 : !torch.int, !torch.int -> !torch.int
%int1_712 = torch.constant.int 1
%2065 = torch.aten.select.int %2058, %int0_709, %int1_712 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2066 = torch.aten.item %2065 : !torch.vtensor<[1],si64> -> !torch.int
%2067 = torch.aten.eq.int %2066, %int0_709 : !torch.int, !torch.int -> !torch.bool
%2068 = torch.aten.Int.bool %2067 : !torch.bool -> !torch.int
%int6_713 = torch.constant.int 6
%2069 = torch.aten.mul.int %2068, %int6_713 : !torch.int, !torch.int -> !torch.int
%2070 = torch.aten.add.int %2066, %2069 : !torch.int, !torch.int -> !torch.int
%int2_714 = torch.constant.int 2
%2071 = torch.aten.select.int %2058, %int0_709, %int2_714 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2072 = torch.aten.item %2071 : !torch.vtensor<[1],si64> -> !torch.int
%2073 = torch.aten.eq.int %2072, %int0_709 : !torch.int, !torch.int -> !torch.bool
%2074 = torch.aten.Int.bool %2073 : !torch.bool -> !torch.int
%int64_715 = torch.constant.int 64
%2075 = torch.aten.mul.int %2074, %int64_715 : !torch.int, !torch.int -> !torch.int
%2076 = torch.aten.add.int %2072, %2075 : !torch.int, !torch.int -> !torch.int
%int3_716 = torch.constant.int 3
%2077 = torch.aten.select.int %2058, %int0_709, %int3_716 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2078 = torch.aten.item %2077 : !torch.vtensor<[1],si64> -> !torch.int
%2079 = torch.aten.eq.int %2078, %int0_709 : !torch.int, !torch.int -> !torch.bool
%2080 = torch.aten.Int.bool %2079 : !torch.bool -> !torch.int
%2081 = torch.aten.mul.int %2080, %int0_709 : !torch.int, !torch.int -> !torch.int
%2082 = torch.aten.add.int %2078, %2081 : !torch.int, !torch.int -> !torch.int
%2083 = torch.prim.ListConstruct %2064, %2070, %2076, %2082 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2084 = torch.aten.reshape %2057, %2083 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_717 = torch.constant.int 1
%int2_718 = torch.constant.int 2
%2085 = torch.aten.transpose.int %2084, %int1_717, %int2_718 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%2086 = torch.vtensor.literal(dense_resource<__97> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_719 = torch.constant.int 0
%int0_720 = torch.constant.int 0
%2087 = torch.aten.select.int %2086, %int0_719, %int0_720 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2088 = torch.aten.item %2087 : !torch.vtensor<[1],si64> -> !torch.int
%2089 = torch.aten.eq.int %2088, %int0_719 : !torch.int, !torch.int -> !torch.bool
%2090 = torch.aten.Int.bool %2089 : !torch.bool -> !torch.int
%int1_721 = torch.constant.int 1
%2091 = torch.aten.mul.int %2090, %int1_721 : !torch.int, !torch.int -> !torch.int
%2092 = torch.aten.add.int %2088, %2091 : !torch.int, !torch.int -> !torch.int
%int1_722 = torch.constant.int 1
%2093 = torch.aten.select.int %2086, %int0_719, %int1_722 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2094 = torch.aten.item %2093 : !torch.vtensor<[1],si64> -> !torch.int
%2095 = torch.aten.eq.int %2094, %int0_719 : !torch.int, !torch.int -> !torch.bool
%2096 = torch.aten.Int.bool %2095 : !torch.bool -> !torch.int
%int6_723 = torch.constant.int 6
%2097 = torch.aten.mul.int %2096, %int6_723 : !torch.int, !torch.int -> !torch.int
%2098 = torch.aten.add.int %2094, %2097 : !torch.int, !torch.int -> !torch.int
%int2_724 = torch.constant.int 2
%2099 = torch.aten.select.int %2086, %int0_719, %int2_724 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2100 = torch.aten.item %2099 : !torch.vtensor<[1],si64> -> !torch.int
%2101 = torch.aten.eq.int %2100, %int0_719 : !torch.int, !torch.int -> !torch.bool
%2102 = torch.aten.Int.bool %2101 : !torch.bool -> !torch.int
%int12_725 = torch.constant.int 12
%2103 = torch.aten.mul.int %2102, %int12_725 : !torch.int, !torch.int -> !torch.int
%2104 = torch.aten.add.int %2100, %2103 : !torch.int, !torch.int -> !torch.int
%2105 = torch.prim.ListConstruct %2092, %2098, %2104 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2106 = torch.aten.reshape %2085, %2105 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%2107 = torch.aten.matmul %2106, %171 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_726 = torch.constant.int 1
%2108 = torch.aten.add.Tensor %67, %2107, %int1_726 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_727 = torch.constant.int 1
%2109 = torch.aten.add.Tensor %1846, %2108, %int1_727 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2110 = torch.vtensor.literal(dense_resource<__98> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_728 = torch.constant.int 0
%int0_729 = torch.constant.int 0
%2111 = torch.aten.select.int %2110, %int0_728, %int0_729 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2112 = torch.aten.item %2111 : !torch.vtensor<[1],si64> -> !torch.int
%2113 = torch.aten.eq.int %2112, %int0_728 : !torch.int, !torch.int -> !torch.bool
%2114 = torch.aten.Int.bool %2113 : !torch.bool -> !torch.int
%int1_730 = torch.constant.int 1
%2115 = torch.aten.mul.int %2114, %int1_730 : !torch.int, !torch.int -> !torch.int
%2116 = torch.aten.add.int %2112, %2115 : !torch.int, !torch.int -> !torch.int
%int1_731 = torch.constant.int 1
%2117 = torch.aten.select.int %2110, %int0_728, %int1_731 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2118 = torch.aten.item %2117 : !torch.vtensor<[1],si64> -> !torch.int
%2119 = torch.aten.eq.int %2118, %int0_728 : !torch.int, !torch.int -> !torch.bool
%2120 = torch.aten.Int.bool %2119 : !torch.bool -> !torch.int
%int6_732 = torch.constant.int 6
%2121 = torch.aten.mul.int %2120, %int6_732 : !torch.int, !torch.int -> !torch.int
%2122 = torch.aten.add.int %2118, %2121 : !torch.int, !torch.int -> !torch.int
%2123 = torch.prim.ListConstruct %2116, %2122 : (!torch.int, !torch.int) -> !torch.list<int>
%2124 = torch.aten.reshape %2109, %2123 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_733 = torch.constant.float 9.9999997473787516E-6
%int768_734 = torch.constant.int 768
%2125 = torch.prim.ListConstruct %int768_734 : (!torch.int) -> !torch.list<int>
%result0_735, %result1_736, %result2_737 = torch.aten.native_layer_norm %2124, %2125, %74, %75, %float9.999990e-06_733 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_738 = torch.constant.int 0
%int1_739 = torch.constant.int 1
%2126 = torch.aten.transpose.int %70, %int0_738, %int1_739 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%2127 = torch.aten.mm %result0_735, %2126 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%2128 = torch.aten.add.Tensor %2127, %71, %int1_739 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%2129 = torch.aten.relu %2128 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_740 = torch.constant.int 0
%int1_741 = torch.constant.int 1
%2130 = torch.aten.transpose.int %72, %int0_740, %int1_741 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%2131 = torch.aten.mm %2129, %2130 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%2132 = torch.aten.add.Tensor %2131, %73, %int1_741 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_742 = torch.constant.int 1
%2133 = torch.aten.add.Tensor %2124, %2132, %int1_742 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%2134 = torch.vtensor.literal(dense_resource<__99> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_743 = torch.constant.int 0
%int0_744 = torch.constant.int 0
%2135 = torch.aten.select.int %2134, %int0_743, %int0_744 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2136 = torch.aten.item %2135 : !torch.vtensor<[1],si64> -> !torch.int
%2137 = torch.aten.eq.int %2136, %int0_743 : !torch.int, !torch.int -> !torch.bool
%2138 = torch.aten.Int.bool %2137 : !torch.bool -> !torch.int
%int6_745 = torch.constant.int 6
%2139 = torch.aten.mul.int %2138, %int6_745 : !torch.int, !torch.int -> !torch.int
%2140 = torch.aten.add.int %2136, %2139 : !torch.int, !torch.int -> !torch.int
%int1_746 = torch.constant.int 1
%2141 = torch.aten.select.int %2134, %int0_743, %int1_746 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2142 = torch.aten.item %2141 : !torch.vtensor<[1],si64> -> !torch.int
%2143 = torch.aten.eq.int %2142, %int0_743 : !torch.int, !torch.int -> !torch.bool
%2144 = torch.aten.Int.bool %2143 : !torch.bool -> !torch.int
%int768_747 = torch.constant.int 768
%2145 = torch.aten.mul.int %2144, %int768_747 : !torch.int, !torch.int -> !torch.int
%2146 = torch.aten.add.int %2142, %2145 : !torch.int, !torch.int -> !torch.int
%int2_748 = torch.constant.int 2
%2147 = torch.aten.select.int %2134, %int0_743, %int2_748 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2148 = torch.aten.item %2147 : !torch.vtensor<[1],si64> -> !torch.int
%2149 = torch.aten.eq.int %2148, %int0_743 : !torch.int, !torch.int -> !torch.bool
%2150 = torch.aten.Int.bool %2149 : !torch.bool -> !torch.int
%2151 = torch.aten.mul.int %2150, %int0_743 : !torch.int, !torch.int -> !torch.int
%2152 = torch.aten.add.int %2148, %2151 : !torch.int, !torch.int -> !torch.int
%2153 = torch.prim.ListConstruct %2140, %2146, %2152 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2154 = torch.aten.reshape %2133, %2153 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_749 = torch.constant.float 9.9999997473787516E-6
%int768_750 = torch.constant.int 768
%2155 = torch.prim.ListConstruct %int768_750 : (!torch.int) -> !torch.list<int>
%result0_751, %result1_752, %result2_753 = torch.aten.native_layer_norm %2154, %2155, %80, %81, %float9.999990e-06_749 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%2156 = torch.aten.matmul %result0_751, %172 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_754 = torch.constant.int 1
%2157 = torch.aten.add.Tensor %78, %2156, %int1_754 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2158 = torch.vtensor.literal(dense_resource<__100> : tensor<f32>) : !torch.vtensor<[],f32>
%2159 = torch.aten.mul.Tensor %2157, %2158 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%2160 = torch.aten.matmul %result0_751, %173 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_755 = torch.constant.int 1
%2161 = torch.aten.add.Tensor %76, %2160, %int1_755 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2162 = torch.vtensor.literal(dense_resource<__101> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%2163 = torch.vtensor.literal(dense_resource<__102> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_756 = torch.constant.int 0
%int0_757 = torch.constant.int 0
%2164 = torch.aten.select.int %2162, %int0_756, %int0_757 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2165 = torch.aten.item %2164 : !torch.vtensor<[1],si64> -> !torch.int
%2166 = torch.aten.eq.int %2165, %int0_756 : !torch.int, !torch.int -> !torch.bool
%2167 = torch.aten.Int.bool %2166 : !torch.bool -> !torch.int
%int1_758 = torch.constant.int 1
%2168 = torch.aten.mul.int %2167, %int1_758 : !torch.int, !torch.int -> !torch.int
%2169 = torch.aten.add.int %2165, %2168 : !torch.int, !torch.int -> !torch.int
%int1_759 = torch.constant.int 1
%2170 = torch.aten.select.int %2162, %int0_756, %int1_759 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2171 = torch.aten.item %2170 : !torch.vtensor<[1],si64> -> !torch.int
%2172 = torch.aten.eq.int %2171, %int0_756 : !torch.int, !torch.int -> !torch.bool
%2173 = torch.aten.Int.bool %2172 : !torch.bool -> !torch.int
%int6_760 = torch.constant.int 6
%2174 = torch.aten.mul.int %2173, %int6_760 : !torch.int, !torch.int -> !torch.int
%2175 = torch.aten.add.int %2171, %2174 : !torch.int, !torch.int -> !torch.int
%int2_761 = torch.constant.int 2
%2176 = torch.aten.select.int %2162, %int0_756, %int2_761 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2177 = torch.aten.item %2176 : !torch.vtensor<[1],si64> -> !torch.int
%2178 = torch.aten.eq.int %2177, %int0_756 : !torch.int, !torch.int -> !torch.bool
%2179 = torch.aten.Int.bool %2178 : !torch.bool -> !torch.int
%int768_762 = torch.constant.int 768
%2180 = torch.aten.mul.int %2179, %int768_762 : !torch.int, !torch.int -> !torch.int
%2181 = torch.aten.add.int %2177, %2180 : !torch.int, !torch.int -> !torch.int
%int3_763 = torch.constant.int 3
%2182 = torch.aten.select.int %2162, %int0_756, %int3_763 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2183 = torch.aten.item %2182 : !torch.vtensor<[1],si64> -> !torch.int
%2184 = torch.aten.eq.int %2183, %int0_756 : !torch.int, !torch.int -> !torch.bool
%2185 = torch.aten.Int.bool %2184 : !torch.bool -> !torch.int
%2186 = torch.aten.mul.int %2185, %int0_756 : !torch.int, !torch.int -> !torch.int
%2187 = torch.aten.add.int %2183, %2186 : !torch.int, !torch.int -> !torch.int
%2188 = torch.prim.ListConstruct %2169, %2175, %2181, %2187 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2189 = torch.aten.reshape %2161, %2188 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_764 = torch.constant.int 1
%int2_765 = torch.constant.int 2
%2190 = torch.aten.transpose.int %2189, %int1_764, %int2_765 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2191 = torch.aten.matmul %result0_751, %174 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_766 = torch.constant.int 1
%2192 = torch.aten.add.Tensor %77, %2191, %int1_766 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_767 = torch.constant.int 0
%int0_768 = torch.constant.int 0
%2193 = torch.aten.select.int %2163, %int0_767, %int0_768 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2194 = torch.aten.item %2193 : !torch.vtensor<[1],si64> -> !torch.int
%2195 = torch.aten.eq.int %2194, %int0_767 : !torch.int, !torch.int -> !torch.bool
%2196 = torch.aten.Int.bool %2195 : !torch.bool -> !torch.int
%int1_769 = torch.constant.int 1
%2197 = torch.aten.mul.int %2196, %int1_769 : !torch.int, !torch.int -> !torch.int
%2198 = torch.aten.add.int %2194, %2197 : !torch.int, !torch.int -> !torch.int
%int1_770 = torch.constant.int 1
%2199 = torch.aten.select.int %2163, %int0_767, %int1_770 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2200 = torch.aten.item %2199 : !torch.vtensor<[1],si64> -> !torch.int
%2201 = torch.aten.eq.int %2200, %int0_767 : !torch.int, !torch.int -> !torch.bool
%2202 = torch.aten.Int.bool %2201 : !torch.bool -> !torch.int
%int6_771 = torch.constant.int 6
%2203 = torch.aten.mul.int %2202, %int6_771 : !torch.int, !torch.int -> !torch.int
%2204 = torch.aten.add.int %2200, %2203 : !torch.int, !torch.int -> !torch.int
%int2_772 = torch.constant.int 2
%2205 = torch.aten.select.int %2163, %int0_767, %int2_772 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2206 = torch.aten.item %2205 : !torch.vtensor<[1],si64> -> !torch.int
%2207 = torch.aten.eq.int %2206, %int0_767 : !torch.int, !torch.int -> !torch.bool
%2208 = torch.aten.Int.bool %2207 : !torch.bool -> !torch.int
%int768_773 = torch.constant.int 768
%2209 = torch.aten.mul.int %2208, %int768_773 : !torch.int, !torch.int -> !torch.int
%2210 = torch.aten.add.int %2206, %2209 : !torch.int, !torch.int -> !torch.int
%int3_774 = torch.constant.int 3
%2211 = torch.aten.select.int %2163, %int0_767, %int3_774 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2212 = torch.aten.item %2211 : !torch.vtensor<[1],si64> -> !torch.int
%2213 = torch.aten.eq.int %2212, %int0_767 : !torch.int, !torch.int -> !torch.bool
%2214 = torch.aten.Int.bool %2213 : !torch.bool -> !torch.int
%2215 = torch.aten.mul.int %2214, %int0_767 : !torch.int, !torch.int -> !torch.int
%2216 = torch.aten.add.int %2212, %2215 : !torch.int, !torch.int -> !torch.int
%2217 = torch.prim.ListConstruct %2198, %2204, %2210, %2216 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2218 = torch.aten.reshape %2192, %2217 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_775 = torch.constant.int 1
%int2_776 = torch.constant.int 2
%2219 = torch.aten.transpose.int %2218, %int1_775, %int2_776 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2220 = torch.vtensor.literal(dense_resource<__103> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_777 = torch.constant.int 0
%int0_778 = torch.constant.int 0
%2221 = torch.aten.select.int %2220, %int0_777, %int0_778 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2222 = torch.aten.item %2221 : !torch.vtensor<[1],si64> -> !torch.int
%2223 = torch.aten.eq.int %2222, %int0_777 : !torch.int, !torch.int -> !torch.bool
%2224 = torch.aten.Int.bool %2223 : !torch.bool -> !torch.int
%int1_779 = torch.constant.int 1
%2225 = torch.aten.mul.int %2224, %int1_779 : !torch.int, !torch.int -> !torch.int
%2226 = torch.aten.add.int %2222, %2225 : !torch.int, !torch.int -> !torch.int
%int1_780 = torch.constant.int 1
%2227 = torch.aten.select.int %2220, %int0_777, %int1_780 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2228 = torch.aten.item %2227 : !torch.vtensor<[1],si64> -> !torch.int
%2229 = torch.aten.eq.int %2228, %int0_777 : !torch.int, !torch.int -> !torch.bool
%2230 = torch.aten.Int.bool %2229 : !torch.bool -> !torch.int
%int6_781 = torch.constant.int 6
%2231 = torch.aten.mul.int %2230, %int6_781 : !torch.int, !torch.int -> !torch.int
%2232 = torch.aten.add.int %2228, %2231 : !torch.int, !torch.int -> !torch.int
%int2_782 = torch.constant.int 2
%2233 = torch.aten.select.int %2220, %int0_777, %int2_782 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2234 = torch.aten.item %2233 : !torch.vtensor<[1],si64> -> !torch.int
%2235 = torch.aten.eq.int %2234, %int0_777 : !torch.int, !torch.int -> !torch.bool
%2236 = torch.aten.Int.bool %2235 : !torch.bool -> !torch.int
%int768_783 = torch.constant.int 768
%2237 = torch.aten.mul.int %2236, %int768_783 : !torch.int, !torch.int -> !torch.int
%2238 = torch.aten.add.int %2234, %2237 : !torch.int, !torch.int -> !torch.int
%int3_784 = torch.constant.int 3
%2239 = torch.aten.select.int %2220, %int0_777, %int3_784 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2240 = torch.aten.item %2239 : !torch.vtensor<[1],si64> -> !torch.int
%2241 = torch.aten.eq.int %2240, %int0_777 : !torch.int, !torch.int -> !torch.bool
%2242 = torch.aten.Int.bool %2241 : !torch.bool -> !torch.int
%2243 = torch.aten.mul.int %2242, %int0_777 : !torch.int, !torch.int -> !torch.int
%2244 = torch.aten.add.int %2240, %2243 : !torch.int, !torch.int -> !torch.int
%2245 = torch.prim.ListConstruct %2226, %2232, %2238, %2244 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2246 = torch.aten.reshape %2159, %2245 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_785 = torch.constant.int 1
%int2_786 = torch.constant.int 2
%2247 = torch.aten.transpose.int %2246, %int1_785, %int2_786 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2248 = torch.vtensor.literal(dense_resource<__104> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%2249 = torch.vtensor.literal(dense_resource<__105> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%2250 = torch.vtensor.literal(dense_resource<__106> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_787 = torch.constant.int 0
%int0_788 = torch.constant.int 0
%2251 = torch.aten.select.int %2248, %int0_787, %int0_788 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2252 = torch.aten.item %2251 : !torch.vtensor<[1],si64> -> !torch.int
%2253 = torch.aten.eq.int %2252, %int0_787 : !torch.int, !torch.int -> !torch.bool
%2254 = torch.aten.Int.bool %2253 : !torch.bool -> !torch.int
%int1_789 = torch.constant.int 1
%2255 = torch.aten.mul.int %2254, %int1_789 : !torch.int, !torch.int -> !torch.int
%2256 = torch.aten.add.int %2252, %2255 : !torch.int, !torch.int -> !torch.int
%int1_790 = torch.constant.int 1
%2257 = torch.aten.select.int %2248, %int0_787, %int1_790 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2258 = torch.aten.item %2257 : !torch.vtensor<[1],si64> -> !torch.int
%2259 = torch.aten.eq.int %2258, %int0_787 : !torch.int, !torch.int -> !torch.bool
%2260 = torch.aten.Int.bool %2259 : !torch.bool -> !torch.int
%int12_791 = torch.constant.int 12
%2261 = torch.aten.mul.int %2260, %int12_791 : !torch.int, !torch.int -> !torch.int
%2262 = torch.aten.add.int %2258, %2261 : !torch.int, !torch.int -> !torch.int
%int2_792 = torch.constant.int 2
%2263 = torch.aten.select.int %2248, %int0_787, %int2_792 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2264 = torch.aten.item %2263 : !torch.vtensor<[1],si64> -> !torch.int
%2265 = torch.aten.eq.int %2264, %int0_787 : !torch.int, !torch.int -> !torch.bool
%2266 = torch.aten.Int.bool %2265 : !torch.bool -> !torch.int
%int6_793 = torch.constant.int 6
%2267 = torch.aten.mul.int %2266, %int6_793 : !torch.int, !torch.int -> !torch.int
%2268 = torch.aten.add.int %2264, %2267 : !torch.int, !torch.int -> !torch.int
%2269 = torch.prim.ListConstruct %2256, %2262, %2268 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2270 = torch.aten.reshape %2247, %2269 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_794 = torch.constant.int 0
%int0_795 = torch.constant.int 0
%2271 = torch.aten.select.int %2249, %int0_794, %int0_795 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2272 = torch.aten.item %2271 : !torch.vtensor<[1],si64> -> !torch.int
%2273 = torch.aten.eq.int %2272, %int0_794 : !torch.int, !torch.int -> !torch.bool
%2274 = torch.aten.Int.bool %2273 : !torch.bool -> !torch.int
%int1_796 = torch.constant.int 1
%2275 = torch.aten.mul.int %2274, %int1_796 : !torch.int, !torch.int -> !torch.int
%2276 = torch.aten.add.int %2272, %2275 : !torch.int, !torch.int -> !torch.int
%int1_797 = torch.constant.int 1
%2277 = torch.aten.select.int %2249, %int0_794, %int1_797 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2278 = torch.aten.item %2277 : !torch.vtensor<[1],si64> -> !torch.int
%2279 = torch.aten.eq.int %2278, %int0_794 : !torch.int, !torch.int -> !torch.bool
%2280 = torch.aten.Int.bool %2279 : !torch.bool -> !torch.int
%int12_798 = torch.constant.int 12
%2281 = torch.aten.mul.int %2280, %int12_798 : !torch.int, !torch.int -> !torch.int
%2282 = torch.aten.add.int %2278, %2281 : !torch.int, !torch.int -> !torch.int
%int2_799 = torch.constant.int 2
%2283 = torch.aten.select.int %2249, %int0_794, %int2_799 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2284 = torch.aten.item %2283 : !torch.vtensor<[1],si64> -> !torch.int
%2285 = torch.aten.eq.int %2284, %int0_794 : !torch.int, !torch.int -> !torch.bool
%2286 = torch.aten.Int.bool %2285 : !torch.bool -> !torch.int
%int6_800 = torch.constant.int 6
%2287 = torch.aten.mul.int %2286, %int6_800 : !torch.int, !torch.int -> !torch.int
%2288 = torch.aten.add.int %2284, %2287 : !torch.int, !torch.int -> !torch.int
%2289 = torch.prim.ListConstruct %2276, %2282, %2288 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2290 = torch.aten.reshape %2190, %2289 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_801 = torch.constant.int 0
%int0_802 = torch.constant.int 0
%2291 = torch.aten.select.int %2250, %int0_801, %int0_802 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2292 = torch.aten.item %2291 : !torch.vtensor<[1],si64> -> !torch.int
%2293 = torch.aten.eq.int %2292, %int0_801 : !torch.int, !torch.int -> !torch.bool
%2294 = torch.aten.Int.bool %2293 : !torch.bool -> !torch.int
%int1_803 = torch.constant.int 1
%2295 = torch.aten.mul.int %2294, %int1_803 : !torch.int, !torch.int -> !torch.int
%2296 = torch.aten.add.int %2292, %2295 : !torch.int, !torch.int -> !torch.int
%int1_804 = torch.constant.int 1
%2297 = torch.aten.select.int %2250, %int0_801, %int1_804 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2298 = torch.aten.item %2297 : !torch.vtensor<[1],si64> -> !torch.int
%2299 = torch.aten.eq.int %2298, %int0_801 : !torch.int, !torch.int -> !torch.bool
%2300 = torch.aten.Int.bool %2299 : !torch.bool -> !torch.int
%int12_805 = torch.constant.int 12
%2301 = torch.aten.mul.int %2300, %int12_805 : !torch.int, !torch.int -> !torch.int
%2302 = torch.aten.add.int %2298, %2301 : !torch.int, !torch.int -> !torch.int
%int2_806 = torch.constant.int 2
%2303 = torch.aten.select.int %2250, %int0_801, %int2_806 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2304 = torch.aten.item %2303 : !torch.vtensor<[1],si64> -> !torch.int
%2305 = torch.aten.eq.int %2304, %int0_801 : !torch.int, !torch.int -> !torch.bool
%2306 = torch.aten.Int.bool %2305 : !torch.bool -> !torch.int
%int6_807 = torch.constant.int 6
%2307 = torch.aten.mul.int %2306, %int6_807 : !torch.int, !torch.int -> !torch.int
%2308 = torch.aten.add.int %2304, %2307 : !torch.int, !torch.int -> !torch.int
%2309 = torch.prim.ListConstruct %2296, %2302, %2308 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2310 = torch.aten.reshape %2219, %2309 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_808 = torch.constant.int 1
%int2_809 = torch.constant.int 2
%2311 = torch.aten.transpose.int %2290, %int1_808, %int2_809 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%2312 = torch.aten.matmul %2270, %2311 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%2313 = torch.vtensor.literal(dense_resource<__107> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_810 = torch.constant.int 0
%int0_811 = torch.constant.int 0
%2314 = torch.aten.select.int %2313, %int0_810, %int0_811 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2315 = torch.aten.item %2314 : !torch.vtensor<[1],si64> -> !torch.int
%2316 = torch.aten.eq.int %2315, %int0_810 : !torch.int, !torch.int -> !torch.bool
%2317 = torch.aten.Int.bool %2316 : !torch.bool -> !torch.int
%int12_812 = torch.constant.int 12
%2318 = torch.aten.mul.int %2317, %int12_812 : !torch.int, !torch.int -> !torch.int
%2319 = torch.aten.add.int %2315, %2318 : !torch.int, !torch.int -> !torch.int
%int1_813 = torch.constant.int 1
%2320 = torch.aten.select.int %2313, %int0_810, %int1_813 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2321 = torch.aten.item %2320 : !torch.vtensor<[1],si64> -> !torch.int
%2322 = torch.aten.eq.int %2321, %int0_810 : !torch.int, !torch.int -> !torch.bool
%2323 = torch.aten.Int.bool %2322 : !torch.bool -> !torch.int
%int6_814 = torch.constant.int 6
%2324 = torch.aten.mul.int %2323, %int6_814 : !torch.int, !torch.int -> !torch.int
%2325 = torch.aten.add.int %2321, %2324 : !torch.int, !torch.int -> !torch.int
%int2_815 = torch.constant.int 2
%2326 = torch.aten.select.int %2313, %int0_810, %int2_815 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2327 = torch.aten.item %2326 : !torch.vtensor<[1],si64> -> !torch.int
%2328 = torch.aten.eq.int %2327, %int0_810 : !torch.int, !torch.int -> !torch.bool
%2329 = torch.aten.Int.bool %2328 : !torch.bool -> !torch.int
%int6_816 = torch.constant.int 6
%2330 = torch.aten.mul.int %2329, %int6_816 : !torch.int, !torch.int -> !torch.int
%2331 = torch.aten.add.int %2327, %2330 : !torch.int, !torch.int -> !torch.int
%int3_817 = torch.constant.int 3
%2332 = torch.aten.select.int %2313, %int0_810, %int3_817 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2333 = torch.aten.item %2332 : !torch.vtensor<[1],si64> -> !torch.int
%2334 = torch.aten.eq.int %2333, %int0_810 : !torch.int, !torch.int -> !torch.bool
%2335 = torch.aten.Int.bool %2334 : !torch.bool -> !torch.int
%2336 = torch.aten.mul.int %2335, %int0_810 : !torch.int, !torch.int -> !torch.int
%2337 = torch.aten.add.int %2333, %2336 : !torch.int, !torch.int -> !torch.int
%2338 = torch.prim.ListConstruct %2319, %2325, %2331, %2337 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2339 = torch.aten.reshape %2312, %2338 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_818 = torch.constant.int 1
%2340 = torch.aten.add.Tensor %2339, %277, %int1_818 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%2341 = torch.vtensor.literal(dense_resource<__108> : tensor<f32>) : !torch.vtensor<[],f32>
%2342 = torch.aten.maximum %2340, %2341 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%2343 = torch.vtensor.literal(dense_resource<__109> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_819 = torch.constant.int 0
%int0_820 = torch.constant.int 0
%2344 = torch.aten.select.int %2343, %int0_819, %int0_820 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2345 = torch.aten.item %2344 : !torch.vtensor<[1],si64> -> !torch.int
%2346 = torch.aten.eq.int %2345, %int0_819 : !torch.int, !torch.int -> !torch.bool
%2347 = torch.aten.Int.bool %2346 : !torch.bool -> !torch.int
%int-1_821 = torch.constant.int -1
%2348 = torch.aten.mul.int %2347, %int-1_821 : !torch.int, !torch.int -> !torch.int
%2349 = torch.aten.add.int %2345, %2348 : !torch.int, !torch.int -> !torch.int
%int1_822 = torch.constant.int 1
%2350 = torch.aten.select.int %2343, %int0_819, %int1_822 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2351 = torch.aten.item %2350 : !torch.vtensor<[1],si64> -> !torch.int
%2352 = torch.aten.eq.int %2351, %int0_819 : !torch.int, !torch.int -> !torch.bool
%2353 = torch.aten.Int.bool %2352 : !torch.bool -> !torch.int
%int12_823 = torch.constant.int 12
%2354 = torch.aten.mul.int %2353, %int12_823 : !torch.int, !torch.int -> !torch.int
%2355 = torch.aten.add.int %2351, %2354 : !torch.int, !torch.int -> !torch.int
%int2_824 = torch.constant.int 2
%2356 = torch.aten.select.int %2343, %int0_819, %int2_824 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2357 = torch.aten.item %2356 : !torch.vtensor<[1],si64> -> !torch.int
%2358 = torch.aten.eq.int %2357, %int0_819 : !torch.int, !torch.int -> !torch.bool
%2359 = torch.aten.Int.bool %2358 : !torch.bool -> !torch.int
%int6_825 = torch.constant.int 6
%2360 = torch.aten.mul.int %2359, %int6_825 : !torch.int, !torch.int -> !torch.int
%2361 = torch.aten.add.int %2357, %2360 : !torch.int, !torch.int -> !torch.int
%2362 = torch.prim.ListConstruct %2349, %2355, %2361 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2363 = torch.aten.reshape %2342, %2362 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_826 = torch.constant.int 2
%none_827 = torch.constant.none
%2364 = torch.aten.softmax.int %2363, %int2_826, %none_827 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%2365 = torch.aten.matmul %2364, %2310 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%2366 = torch.vtensor.literal(dense_resource<__110> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_828 = torch.constant.int 0
%int0_829 = torch.constant.int 0
%2367 = torch.aten.select.int %2366, %int0_828, %int0_829 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2368 = torch.aten.item %2367 : !torch.vtensor<[1],si64> -> !torch.int
%2369 = torch.aten.eq.int %2368, %int0_828 : !torch.int, !torch.int -> !torch.bool
%2370 = torch.aten.Int.bool %2369 : !torch.bool -> !torch.int
%int12_830 = torch.constant.int 12
%2371 = torch.aten.mul.int %2370, %int12_830 : !torch.int, !torch.int -> !torch.int
%2372 = torch.aten.add.int %2368, %2371 : !torch.int, !torch.int -> !torch.int
%int1_831 = torch.constant.int 1
%2373 = torch.aten.select.int %2366, %int0_828, %int1_831 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2374 = torch.aten.item %2373 : !torch.vtensor<[1],si64> -> !torch.int
%2375 = torch.aten.eq.int %2374, %int0_828 : !torch.int, !torch.int -> !torch.bool
%2376 = torch.aten.Int.bool %2375 : !torch.bool -> !torch.int
%int6_832 = torch.constant.int 6
%2377 = torch.aten.mul.int %2376, %int6_832 : !torch.int, !torch.int -> !torch.int
%2378 = torch.aten.add.int %2374, %2377 : !torch.int, !torch.int -> !torch.int
%int2_833 = torch.constant.int 2
%2379 = torch.aten.select.int %2366, %int0_828, %int2_833 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2380 = torch.aten.item %2379 : !torch.vtensor<[1],si64> -> !torch.int
%2381 = torch.aten.eq.int %2380, %int0_828 : !torch.int, !torch.int -> !torch.bool
%2382 = torch.aten.Int.bool %2381 : !torch.bool -> !torch.int
%int64_834 = torch.constant.int 64
%2383 = torch.aten.mul.int %2382, %int64_834 : !torch.int, !torch.int -> !torch.int
%2384 = torch.aten.add.int %2380, %2383 : !torch.int, !torch.int -> !torch.int
%int3_835 = torch.constant.int 3
%2385 = torch.aten.select.int %2366, %int0_828, %int3_835 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2386 = torch.aten.item %2385 : !torch.vtensor<[1],si64> -> !torch.int
%2387 = torch.aten.eq.int %2386, %int0_828 : !torch.int, !torch.int -> !torch.bool
%2388 = torch.aten.Int.bool %2387 : !torch.bool -> !torch.int
%2389 = torch.aten.mul.int %2388, %int0_828 : !torch.int, !torch.int -> !torch.int
%2390 = torch.aten.add.int %2386, %2389 : !torch.int, !torch.int -> !torch.int
%2391 = torch.prim.ListConstruct %2372, %2378, %2384, %2390 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2392 = torch.aten.reshape %2365, %2391 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_836 = torch.constant.int 1
%int2_837 = torch.constant.int 2
%2393 = torch.aten.transpose.int %2392, %int1_836, %int2_837 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%2394 = torch.vtensor.literal(dense_resource<__111> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_838 = torch.constant.int 0
%int0_839 = torch.constant.int 0
%2395 = torch.aten.select.int %2394, %int0_838, %int0_839 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2396 = torch.aten.item %2395 : !torch.vtensor<[1],si64> -> !torch.int
%2397 = torch.aten.eq.int %2396, %int0_838 : !torch.int, !torch.int -> !torch.bool
%2398 = torch.aten.Int.bool %2397 : !torch.bool -> !torch.int
%int1_840 = torch.constant.int 1
%2399 = torch.aten.mul.int %2398, %int1_840 : !torch.int, !torch.int -> !torch.int
%2400 = torch.aten.add.int %2396, %2399 : !torch.int, !torch.int -> !torch.int
%int1_841 = torch.constant.int 1
%2401 = torch.aten.select.int %2394, %int0_838, %int1_841 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2402 = torch.aten.item %2401 : !torch.vtensor<[1],si64> -> !torch.int
%2403 = torch.aten.eq.int %2402, %int0_838 : !torch.int, !torch.int -> !torch.bool
%2404 = torch.aten.Int.bool %2403 : !torch.bool -> !torch.int
%int6_842 = torch.constant.int 6
%2405 = torch.aten.mul.int %2404, %int6_842 : !torch.int, !torch.int -> !torch.int
%2406 = torch.aten.add.int %2402, %2405 : !torch.int, !torch.int -> !torch.int
%int2_843 = torch.constant.int 2
%2407 = torch.aten.select.int %2394, %int0_838, %int2_843 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2408 = torch.aten.item %2407 : !torch.vtensor<[1],si64> -> !torch.int
%2409 = torch.aten.eq.int %2408, %int0_838 : !torch.int, !torch.int -> !torch.bool
%2410 = torch.aten.Int.bool %2409 : !torch.bool -> !torch.int
%int12_844 = torch.constant.int 12
%2411 = torch.aten.mul.int %2410, %int12_844 : !torch.int, !torch.int -> !torch.int
%2412 = torch.aten.add.int %2408, %2411 : !torch.int, !torch.int -> !torch.int
%2413 = torch.prim.ListConstruct %2400, %2406, %2412 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2414 = torch.aten.reshape %2393, %2413 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%2415 = torch.aten.matmul %2414, %175 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_845 = torch.constant.int 1
%2416 = torch.aten.add.Tensor %79, %2415, %int1_845 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_846 = torch.constant.int 1
%2417 = torch.aten.add.Tensor %2154, %2416, %int1_846 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2418 = torch.vtensor.literal(dense_resource<__112> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_847 = torch.constant.int 0
%int0_848 = torch.constant.int 0
%2419 = torch.aten.select.int %2418, %int0_847, %int0_848 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2420 = torch.aten.item %2419 : !torch.vtensor<[1],si64> -> !torch.int
%2421 = torch.aten.eq.int %2420, %int0_847 : !torch.int, !torch.int -> !torch.bool
%2422 = torch.aten.Int.bool %2421 : !torch.bool -> !torch.int
%int1_849 = torch.constant.int 1
%2423 = torch.aten.mul.int %2422, %int1_849 : !torch.int, !torch.int -> !torch.int
%2424 = torch.aten.add.int %2420, %2423 : !torch.int, !torch.int -> !torch.int
%int1_850 = torch.constant.int 1
%2425 = torch.aten.select.int %2418, %int0_847, %int1_850 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2426 = torch.aten.item %2425 : !torch.vtensor<[1],si64> -> !torch.int
%2427 = torch.aten.eq.int %2426, %int0_847 : !torch.int, !torch.int -> !torch.bool
%2428 = torch.aten.Int.bool %2427 : !torch.bool -> !torch.int
%int6_851 = torch.constant.int 6
%2429 = torch.aten.mul.int %2428, %int6_851 : !torch.int, !torch.int -> !torch.int
%2430 = torch.aten.add.int %2426, %2429 : !torch.int, !torch.int -> !torch.int
%2431 = torch.prim.ListConstruct %2424, %2430 : (!torch.int, !torch.int) -> !torch.list<int>
%2432 = torch.aten.reshape %2417, %2431 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_852 = torch.constant.float 9.9999997473787516E-6
%int768_853 = torch.constant.int 768
%2433 = torch.prim.ListConstruct %int768_853 : (!torch.int) -> !torch.list<int>
%result0_854, %result1_855, %result2_856 = torch.aten.native_layer_norm %2432, %2433, %86, %87, %float9.999990e-06_852 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_857 = torch.constant.int 0
%int1_858 = torch.constant.int 1
%2434 = torch.aten.transpose.int %82, %int0_857, %int1_858 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%2435 = torch.aten.mm %result0_854, %2434 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%2436 = torch.aten.add.Tensor %2435, %83, %int1_858 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%2437 = torch.aten.relu %2436 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_859 = torch.constant.int 0
%int1_860 = torch.constant.int 1
%2438 = torch.aten.transpose.int %84, %int0_859, %int1_860 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%2439 = torch.aten.mm %2437, %2438 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%2440 = torch.aten.add.Tensor %2439, %85, %int1_860 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_861 = torch.constant.int 1
%2441 = torch.aten.add.Tensor %2432, %2440, %int1_861 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%2442 = torch.vtensor.literal(dense_resource<__113> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_862 = torch.constant.int 0
%int0_863 = torch.constant.int 0
%2443 = torch.aten.select.int %2442, %int0_862, %int0_863 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2444 = torch.aten.item %2443 : !torch.vtensor<[1],si64> -> !torch.int
%2445 = torch.aten.eq.int %2444, %int0_862 : !torch.int, !torch.int -> !torch.bool
%2446 = torch.aten.Int.bool %2445 : !torch.bool -> !torch.int
%int6_864 = torch.constant.int 6
%2447 = torch.aten.mul.int %2446, %int6_864 : !torch.int, !torch.int -> !torch.int
%2448 = torch.aten.add.int %2444, %2447 : !torch.int, !torch.int -> !torch.int
%int1_865 = torch.constant.int 1
%2449 = torch.aten.select.int %2442, %int0_862, %int1_865 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2450 = torch.aten.item %2449 : !torch.vtensor<[1],si64> -> !torch.int
%2451 = torch.aten.eq.int %2450, %int0_862 : !torch.int, !torch.int -> !torch.bool
%2452 = torch.aten.Int.bool %2451 : !torch.bool -> !torch.int
%int768_866 = torch.constant.int 768
%2453 = torch.aten.mul.int %2452, %int768_866 : !torch.int, !torch.int -> !torch.int
%2454 = torch.aten.add.int %2450, %2453 : !torch.int, !torch.int -> !torch.int
%int2_867 = torch.constant.int 2
%2455 = torch.aten.select.int %2442, %int0_862, %int2_867 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2456 = torch.aten.item %2455 : !torch.vtensor<[1],si64> -> !torch.int
%2457 = torch.aten.eq.int %2456, %int0_862 : !torch.int, !torch.int -> !torch.bool
%2458 = torch.aten.Int.bool %2457 : !torch.bool -> !torch.int
%2459 = torch.aten.mul.int %2458, %int0_862 : !torch.int, !torch.int -> !torch.int
%2460 = torch.aten.add.int %2456, %2459 : !torch.int, !torch.int -> !torch.int
%2461 = torch.prim.ListConstruct %2448, %2454, %2460 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2462 = torch.aten.reshape %2441, %2461 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_868 = torch.constant.float 9.9999997473787516E-6
%int768_869 = torch.constant.int 768
%2463 = torch.prim.ListConstruct %int768_869 : (!torch.int) -> !torch.list<int>
%result0_870, %result1_871, %result2_872 = torch.aten.native_layer_norm %2462, %2463, %92, %93, %float9.999990e-06_868 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%2464 = torch.aten.matmul %result0_870, %176 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_873 = torch.constant.int 1
%2465 = torch.aten.add.Tensor %90, %2464, %int1_873 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2466 = torch.vtensor.literal(dense_resource<__114> : tensor<f32>) : !torch.vtensor<[],f32>
%2467 = torch.aten.mul.Tensor %2465, %2466 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%2468 = torch.aten.matmul %result0_870, %177 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_874 = torch.constant.int 1
%2469 = torch.aten.add.Tensor %88, %2468, %int1_874 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2470 = torch.vtensor.literal(dense_resource<__115> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%2471 = torch.vtensor.literal(dense_resource<__116> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_875 = torch.constant.int 0
%int0_876 = torch.constant.int 0
%2472 = torch.aten.select.int %2470, %int0_875, %int0_876 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2473 = torch.aten.item %2472 : !torch.vtensor<[1],si64> -> !torch.int
%2474 = torch.aten.eq.int %2473, %int0_875 : !torch.int, !torch.int -> !torch.bool
%2475 = torch.aten.Int.bool %2474 : !torch.bool -> !torch.int
%int1_877 = torch.constant.int 1
%2476 = torch.aten.mul.int %2475, %int1_877 : !torch.int, !torch.int -> !torch.int
%2477 = torch.aten.add.int %2473, %2476 : !torch.int, !torch.int -> !torch.int
%int1_878 = torch.constant.int 1
%2478 = torch.aten.select.int %2470, %int0_875, %int1_878 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2479 = torch.aten.item %2478 : !torch.vtensor<[1],si64> -> !torch.int
%2480 = torch.aten.eq.int %2479, %int0_875 : !torch.int, !torch.int -> !torch.bool
%2481 = torch.aten.Int.bool %2480 : !torch.bool -> !torch.int
%int6_879 = torch.constant.int 6
%2482 = torch.aten.mul.int %2481, %int6_879 : !torch.int, !torch.int -> !torch.int
%2483 = torch.aten.add.int %2479, %2482 : !torch.int, !torch.int -> !torch.int
%int2_880 = torch.constant.int 2
%2484 = torch.aten.select.int %2470, %int0_875, %int2_880 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2485 = torch.aten.item %2484 : !torch.vtensor<[1],si64> -> !torch.int
%2486 = torch.aten.eq.int %2485, %int0_875 : !torch.int, !torch.int -> !torch.bool
%2487 = torch.aten.Int.bool %2486 : !torch.bool -> !torch.int
%int768_881 = torch.constant.int 768
%2488 = torch.aten.mul.int %2487, %int768_881 : !torch.int, !torch.int -> !torch.int
%2489 = torch.aten.add.int %2485, %2488 : !torch.int, !torch.int -> !torch.int
%int3_882 = torch.constant.int 3
%2490 = torch.aten.select.int %2470, %int0_875, %int3_882 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2491 = torch.aten.item %2490 : !torch.vtensor<[1],si64> -> !torch.int
%2492 = torch.aten.eq.int %2491, %int0_875 : !torch.int, !torch.int -> !torch.bool
%2493 = torch.aten.Int.bool %2492 : !torch.bool -> !torch.int
%2494 = torch.aten.mul.int %2493, %int0_875 : !torch.int, !torch.int -> !torch.int
%2495 = torch.aten.add.int %2491, %2494 : !torch.int, !torch.int -> !torch.int
%2496 = torch.prim.ListConstruct %2477, %2483, %2489, %2495 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2497 = torch.aten.reshape %2469, %2496 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_883 = torch.constant.int 1
%int2_884 = torch.constant.int 2
%2498 = torch.aten.transpose.int %2497, %int1_883, %int2_884 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2499 = torch.aten.matmul %result0_870, %178 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_885 = torch.constant.int 1
%2500 = torch.aten.add.Tensor %89, %2499, %int1_885 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_886 = torch.constant.int 0
%int0_887 = torch.constant.int 0
%2501 = torch.aten.select.int %2471, %int0_886, %int0_887 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2502 = torch.aten.item %2501 : !torch.vtensor<[1],si64> -> !torch.int
%2503 = torch.aten.eq.int %2502, %int0_886 : !torch.int, !torch.int -> !torch.bool
%2504 = torch.aten.Int.bool %2503 : !torch.bool -> !torch.int
%int1_888 = torch.constant.int 1
%2505 = torch.aten.mul.int %2504, %int1_888 : !torch.int, !torch.int -> !torch.int
%2506 = torch.aten.add.int %2502, %2505 : !torch.int, !torch.int -> !torch.int
%int1_889 = torch.constant.int 1
%2507 = torch.aten.select.int %2471, %int0_886, %int1_889 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2508 = torch.aten.item %2507 : !torch.vtensor<[1],si64> -> !torch.int
%2509 = torch.aten.eq.int %2508, %int0_886 : !torch.int, !torch.int -> !torch.bool
%2510 = torch.aten.Int.bool %2509 : !torch.bool -> !torch.int
%int6_890 = torch.constant.int 6
%2511 = torch.aten.mul.int %2510, %int6_890 : !torch.int, !torch.int -> !torch.int
%2512 = torch.aten.add.int %2508, %2511 : !torch.int, !torch.int -> !torch.int
%int2_891 = torch.constant.int 2
%2513 = torch.aten.select.int %2471, %int0_886, %int2_891 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2514 = torch.aten.item %2513 : !torch.vtensor<[1],si64> -> !torch.int
%2515 = torch.aten.eq.int %2514, %int0_886 : !torch.int, !torch.int -> !torch.bool
%2516 = torch.aten.Int.bool %2515 : !torch.bool -> !torch.int
%int768_892 = torch.constant.int 768
%2517 = torch.aten.mul.int %2516, %int768_892 : !torch.int, !torch.int -> !torch.int
%2518 = torch.aten.add.int %2514, %2517 : !torch.int, !torch.int -> !torch.int
%int3_893 = torch.constant.int 3
%2519 = torch.aten.select.int %2471, %int0_886, %int3_893 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2520 = torch.aten.item %2519 : !torch.vtensor<[1],si64> -> !torch.int
%2521 = torch.aten.eq.int %2520, %int0_886 : !torch.int, !torch.int -> !torch.bool
%2522 = torch.aten.Int.bool %2521 : !torch.bool -> !torch.int
%2523 = torch.aten.mul.int %2522, %int0_886 : !torch.int, !torch.int -> !torch.int
%2524 = torch.aten.add.int %2520, %2523 : !torch.int, !torch.int -> !torch.int
%2525 = torch.prim.ListConstruct %2506, %2512, %2518, %2524 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2526 = torch.aten.reshape %2500, %2525 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_894 = torch.constant.int 1
%int2_895 = torch.constant.int 2
%2527 = torch.aten.transpose.int %2526, %int1_894, %int2_895 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2528 = torch.vtensor.literal(dense_resource<__117> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_896 = torch.constant.int 0
%int0_897 = torch.constant.int 0
%2529 = torch.aten.select.int %2528, %int0_896, %int0_897 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2530 = torch.aten.item %2529 : !torch.vtensor<[1],si64> -> !torch.int
%2531 = torch.aten.eq.int %2530, %int0_896 : !torch.int, !torch.int -> !torch.bool
%2532 = torch.aten.Int.bool %2531 : !torch.bool -> !torch.int
%int1_898 = torch.constant.int 1
%2533 = torch.aten.mul.int %2532, %int1_898 : !torch.int, !torch.int -> !torch.int
%2534 = torch.aten.add.int %2530, %2533 : !torch.int, !torch.int -> !torch.int
%int1_899 = torch.constant.int 1
%2535 = torch.aten.select.int %2528, %int0_896, %int1_899 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2536 = torch.aten.item %2535 : !torch.vtensor<[1],si64> -> !torch.int
%2537 = torch.aten.eq.int %2536, %int0_896 : !torch.int, !torch.int -> !torch.bool
%2538 = torch.aten.Int.bool %2537 : !torch.bool -> !torch.int
%int6_900 = torch.constant.int 6
%2539 = torch.aten.mul.int %2538, %int6_900 : !torch.int, !torch.int -> !torch.int
%2540 = torch.aten.add.int %2536, %2539 : !torch.int, !torch.int -> !torch.int
%int2_901 = torch.constant.int 2
%2541 = torch.aten.select.int %2528, %int0_896, %int2_901 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2542 = torch.aten.item %2541 : !torch.vtensor<[1],si64> -> !torch.int
%2543 = torch.aten.eq.int %2542, %int0_896 : !torch.int, !torch.int -> !torch.bool
%2544 = torch.aten.Int.bool %2543 : !torch.bool -> !torch.int
%int768_902 = torch.constant.int 768
%2545 = torch.aten.mul.int %2544, %int768_902 : !torch.int, !torch.int -> !torch.int
%2546 = torch.aten.add.int %2542, %2545 : !torch.int, !torch.int -> !torch.int
%int3_903 = torch.constant.int 3
%2547 = torch.aten.select.int %2528, %int0_896, %int3_903 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2548 = torch.aten.item %2547 : !torch.vtensor<[1],si64> -> !torch.int
%2549 = torch.aten.eq.int %2548, %int0_896 : !torch.int, !torch.int -> !torch.bool
%2550 = torch.aten.Int.bool %2549 : !torch.bool -> !torch.int
%2551 = torch.aten.mul.int %2550, %int0_896 : !torch.int, !torch.int -> !torch.int
%2552 = torch.aten.add.int %2548, %2551 : !torch.int, !torch.int -> !torch.int
%2553 = torch.prim.ListConstruct %2534, %2540, %2546, %2552 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2554 = torch.aten.reshape %2467, %2553 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_904 = torch.constant.int 1
%int2_905 = torch.constant.int 2
%2555 = torch.aten.transpose.int %2554, %int1_904, %int2_905 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2556 = torch.vtensor.literal(dense_resource<__118> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%2557 = torch.vtensor.literal(dense_resource<__119> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%2558 = torch.vtensor.literal(dense_resource<__120> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_906 = torch.constant.int 0
%int0_907 = torch.constant.int 0
%2559 = torch.aten.select.int %2556, %int0_906, %int0_907 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2560 = torch.aten.item %2559 : !torch.vtensor<[1],si64> -> !torch.int
%2561 = torch.aten.eq.int %2560, %int0_906 : !torch.int, !torch.int -> !torch.bool
%2562 = torch.aten.Int.bool %2561 : !torch.bool -> !torch.int
%int1_908 = torch.constant.int 1
%2563 = torch.aten.mul.int %2562, %int1_908 : !torch.int, !torch.int -> !torch.int
%2564 = torch.aten.add.int %2560, %2563 : !torch.int, !torch.int -> !torch.int
%int1_909 = torch.constant.int 1
%2565 = torch.aten.select.int %2556, %int0_906, %int1_909 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2566 = torch.aten.item %2565 : !torch.vtensor<[1],si64> -> !torch.int
%2567 = torch.aten.eq.int %2566, %int0_906 : !torch.int, !torch.int -> !torch.bool
%2568 = torch.aten.Int.bool %2567 : !torch.bool -> !torch.int
%int12_910 = torch.constant.int 12
%2569 = torch.aten.mul.int %2568, %int12_910 : !torch.int, !torch.int -> !torch.int
%2570 = torch.aten.add.int %2566, %2569 : !torch.int, !torch.int -> !torch.int
%int2_911 = torch.constant.int 2
%2571 = torch.aten.select.int %2556, %int0_906, %int2_911 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2572 = torch.aten.item %2571 : !torch.vtensor<[1],si64> -> !torch.int
%2573 = torch.aten.eq.int %2572, %int0_906 : !torch.int, !torch.int -> !torch.bool
%2574 = torch.aten.Int.bool %2573 : !torch.bool -> !torch.int
%int6_912 = torch.constant.int 6
%2575 = torch.aten.mul.int %2574, %int6_912 : !torch.int, !torch.int -> !torch.int
%2576 = torch.aten.add.int %2572, %2575 : !torch.int, !torch.int -> !torch.int
%2577 = torch.prim.ListConstruct %2564, %2570, %2576 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2578 = torch.aten.reshape %2555, %2577 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_913 = torch.constant.int 0
%int0_914 = torch.constant.int 0
%2579 = torch.aten.select.int %2557, %int0_913, %int0_914 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2580 = torch.aten.item %2579 : !torch.vtensor<[1],si64> -> !torch.int
%2581 = torch.aten.eq.int %2580, %int0_913 : !torch.int, !torch.int -> !torch.bool
%2582 = torch.aten.Int.bool %2581 : !torch.bool -> !torch.int
%int1_915 = torch.constant.int 1
%2583 = torch.aten.mul.int %2582, %int1_915 : !torch.int, !torch.int -> !torch.int
%2584 = torch.aten.add.int %2580, %2583 : !torch.int, !torch.int -> !torch.int
%int1_916 = torch.constant.int 1
%2585 = torch.aten.select.int %2557, %int0_913, %int1_916 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2586 = torch.aten.item %2585 : !torch.vtensor<[1],si64> -> !torch.int
%2587 = torch.aten.eq.int %2586, %int0_913 : !torch.int, !torch.int -> !torch.bool
%2588 = torch.aten.Int.bool %2587 : !torch.bool -> !torch.int
%int12_917 = torch.constant.int 12
%2589 = torch.aten.mul.int %2588, %int12_917 : !torch.int, !torch.int -> !torch.int
%2590 = torch.aten.add.int %2586, %2589 : !torch.int, !torch.int -> !torch.int
%int2_918 = torch.constant.int 2
%2591 = torch.aten.select.int %2557, %int0_913, %int2_918 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2592 = torch.aten.item %2591 : !torch.vtensor<[1],si64> -> !torch.int
%2593 = torch.aten.eq.int %2592, %int0_913 : !torch.int, !torch.int -> !torch.bool
%2594 = torch.aten.Int.bool %2593 : !torch.bool -> !torch.int
%int6_919 = torch.constant.int 6
%2595 = torch.aten.mul.int %2594, %int6_919 : !torch.int, !torch.int -> !torch.int
%2596 = torch.aten.add.int %2592, %2595 : !torch.int, !torch.int -> !torch.int
%2597 = torch.prim.ListConstruct %2584, %2590, %2596 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2598 = torch.aten.reshape %2498, %2597 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_920 = torch.constant.int 0
%int0_921 = torch.constant.int 0
%2599 = torch.aten.select.int %2558, %int0_920, %int0_921 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2600 = torch.aten.item %2599 : !torch.vtensor<[1],si64> -> !torch.int
%2601 = torch.aten.eq.int %2600, %int0_920 : !torch.int, !torch.int -> !torch.bool
%2602 = torch.aten.Int.bool %2601 : !torch.bool -> !torch.int
%int1_922 = torch.constant.int 1
%2603 = torch.aten.mul.int %2602, %int1_922 : !torch.int, !torch.int -> !torch.int
%2604 = torch.aten.add.int %2600, %2603 : !torch.int, !torch.int -> !torch.int
%int1_923 = torch.constant.int 1
%2605 = torch.aten.select.int %2558, %int0_920, %int1_923 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2606 = torch.aten.item %2605 : !torch.vtensor<[1],si64> -> !torch.int
%2607 = torch.aten.eq.int %2606, %int0_920 : !torch.int, !torch.int -> !torch.bool
%2608 = torch.aten.Int.bool %2607 : !torch.bool -> !torch.int
%int12_924 = torch.constant.int 12
%2609 = torch.aten.mul.int %2608, %int12_924 : !torch.int, !torch.int -> !torch.int
%2610 = torch.aten.add.int %2606, %2609 : !torch.int, !torch.int -> !torch.int
%int2_925 = torch.constant.int 2
%2611 = torch.aten.select.int %2558, %int0_920, %int2_925 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2612 = torch.aten.item %2611 : !torch.vtensor<[1],si64> -> !torch.int
%2613 = torch.aten.eq.int %2612, %int0_920 : !torch.int, !torch.int -> !torch.bool
%2614 = torch.aten.Int.bool %2613 : !torch.bool -> !torch.int
%int6_926 = torch.constant.int 6
%2615 = torch.aten.mul.int %2614, %int6_926 : !torch.int, !torch.int -> !torch.int
%2616 = torch.aten.add.int %2612, %2615 : !torch.int, !torch.int -> !torch.int
%2617 = torch.prim.ListConstruct %2604, %2610, %2616 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2618 = torch.aten.reshape %2527, %2617 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_927 = torch.constant.int 1
%int2_928 = torch.constant.int 2
%2619 = torch.aten.transpose.int %2598, %int1_927, %int2_928 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%2620 = torch.aten.matmul %2578, %2619 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%2621 = torch.vtensor.literal(dense_resource<__121> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_929 = torch.constant.int 0
%int0_930 = torch.constant.int 0
%2622 = torch.aten.select.int %2621, %int0_929, %int0_930 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2623 = torch.aten.item %2622 : !torch.vtensor<[1],si64> -> !torch.int
%2624 = torch.aten.eq.int %2623, %int0_929 : !torch.int, !torch.int -> !torch.bool
%2625 = torch.aten.Int.bool %2624 : !torch.bool -> !torch.int
%int12_931 = torch.constant.int 12
%2626 = torch.aten.mul.int %2625, %int12_931 : !torch.int, !torch.int -> !torch.int
%2627 = torch.aten.add.int %2623, %2626 : !torch.int, !torch.int -> !torch.int
%int1_932 = torch.constant.int 1
%2628 = torch.aten.select.int %2621, %int0_929, %int1_932 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2629 = torch.aten.item %2628 : !torch.vtensor<[1],si64> -> !torch.int
%2630 = torch.aten.eq.int %2629, %int0_929 : !torch.int, !torch.int -> !torch.bool
%2631 = torch.aten.Int.bool %2630 : !torch.bool -> !torch.int
%int6_933 = torch.constant.int 6
%2632 = torch.aten.mul.int %2631, %int6_933 : !torch.int, !torch.int -> !torch.int
%2633 = torch.aten.add.int %2629, %2632 : !torch.int, !torch.int -> !torch.int
%int2_934 = torch.constant.int 2
%2634 = torch.aten.select.int %2621, %int0_929, %int2_934 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2635 = torch.aten.item %2634 : !torch.vtensor<[1],si64> -> !torch.int
%2636 = torch.aten.eq.int %2635, %int0_929 : !torch.int, !torch.int -> !torch.bool
%2637 = torch.aten.Int.bool %2636 : !torch.bool -> !torch.int
%int6_935 = torch.constant.int 6
%2638 = torch.aten.mul.int %2637, %int6_935 : !torch.int, !torch.int -> !torch.int
%2639 = torch.aten.add.int %2635, %2638 : !torch.int, !torch.int -> !torch.int
%int3_936 = torch.constant.int 3
%2640 = torch.aten.select.int %2621, %int0_929, %int3_936 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2641 = torch.aten.item %2640 : !torch.vtensor<[1],si64> -> !torch.int
%2642 = torch.aten.eq.int %2641, %int0_929 : !torch.int, !torch.int -> !torch.bool
%2643 = torch.aten.Int.bool %2642 : !torch.bool -> !torch.int
%2644 = torch.aten.mul.int %2643, %int0_929 : !torch.int, !torch.int -> !torch.int
%2645 = torch.aten.add.int %2641, %2644 : !torch.int, !torch.int -> !torch.int
%2646 = torch.prim.ListConstruct %2627, %2633, %2639, %2645 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2647 = torch.aten.reshape %2620, %2646 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_937 = torch.constant.int 1
%2648 = torch.aten.add.Tensor %2647, %277, %int1_937 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%2649 = torch.vtensor.literal(dense_resource<__122> : tensor<f32>) : !torch.vtensor<[],f32>
%2650 = torch.aten.maximum %2648, %2649 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%2651 = torch.vtensor.literal(dense_resource<__123> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_938 = torch.constant.int 0
%int0_939 = torch.constant.int 0
%2652 = torch.aten.select.int %2651, %int0_938, %int0_939 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2653 = torch.aten.item %2652 : !torch.vtensor<[1],si64> -> !torch.int
%2654 = torch.aten.eq.int %2653, %int0_938 : !torch.int, !torch.int -> !torch.bool
%2655 = torch.aten.Int.bool %2654 : !torch.bool -> !torch.int
%int-1_940 = torch.constant.int -1
%2656 = torch.aten.mul.int %2655, %int-1_940 : !torch.int, !torch.int -> !torch.int
%2657 = torch.aten.add.int %2653, %2656 : !torch.int, !torch.int -> !torch.int
%int1_941 = torch.constant.int 1
%2658 = torch.aten.select.int %2651, %int0_938, %int1_941 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2659 = torch.aten.item %2658 : !torch.vtensor<[1],si64> -> !torch.int
%2660 = torch.aten.eq.int %2659, %int0_938 : !torch.int, !torch.int -> !torch.bool
%2661 = torch.aten.Int.bool %2660 : !torch.bool -> !torch.int
%int12_942 = torch.constant.int 12
%2662 = torch.aten.mul.int %2661, %int12_942 : !torch.int, !torch.int -> !torch.int
%2663 = torch.aten.add.int %2659, %2662 : !torch.int, !torch.int -> !torch.int
%int2_943 = torch.constant.int 2
%2664 = torch.aten.select.int %2651, %int0_938, %int2_943 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2665 = torch.aten.item %2664 : !torch.vtensor<[1],si64> -> !torch.int
%2666 = torch.aten.eq.int %2665, %int0_938 : !torch.int, !torch.int -> !torch.bool
%2667 = torch.aten.Int.bool %2666 : !torch.bool -> !torch.int
%int6_944 = torch.constant.int 6
%2668 = torch.aten.mul.int %2667, %int6_944 : !torch.int, !torch.int -> !torch.int
%2669 = torch.aten.add.int %2665, %2668 : !torch.int, !torch.int -> !torch.int
%2670 = torch.prim.ListConstruct %2657, %2663, %2669 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2671 = torch.aten.reshape %2650, %2670 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_945 = torch.constant.int 2
%none_946 = torch.constant.none
%2672 = torch.aten.softmax.int %2671, %int2_945, %none_946 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%2673 = torch.aten.matmul %2672, %2618 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%2674 = torch.vtensor.literal(dense_resource<__124> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_947 = torch.constant.int 0
%int0_948 = torch.constant.int 0
%2675 = torch.aten.select.int %2674, %int0_947, %int0_948 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2676 = torch.aten.item %2675 : !torch.vtensor<[1],si64> -> !torch.int
%2677 = torch.aten.eq.int %2676, %int0_947 : !torch.int, !torch.int -> !torch.bool
%2678 = torch.aten.Int.bool %2677 : !torch.bool -> !torch.int
%int12_949 = torch.constant.int 12
%2679 = torch.aten.mul.int %2678, %int12_949 : !torch.int, !torch.int -> !torch.int
%2680 = torch.aten.add.int %2676, %2679 : !torch.int, !torch.int -> !torch.int
%int1_950 = torch.constant.int 1
%2681 = torch.aten.select.int %2674, %int0_947, %int1_950 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2682 = torch.aten.item %2681 : !torch.vtensor<[1],si64> -> !torch.int
%2683 = torch.aten.eq.int %2682, %int0_947 : !torch.int, !torch.int -> !torch.bool
%2684 = torch.aten.Int.bool %2683 : !torch.bool -> !torch.int
%int6_951 = torch.constant.int 6
%2685 = torch.aten.mul.int %2684, %int6_951 : !torch.int, !torch.int -> !torch.int
%2686 = torch.aten.add.int %2682, %2685 : !torch.int, !torch.int -> !torch.int
%int2_952 = torch.constant.int 2
%2687 = torch.aten.select.int %2674, %int0_947, %int2_952 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2688 = torch.aten.item %2687 : !torch.vtensor<[1],si64> -> !torch.int
%2689 = torch.aten.eq.int %2688, %int0_947 : !torch.int, !torch.int -> !torch.bool
%2690 = torch.aten.Int.bool %2689 : !torch.bool -> !torch.int
%int64_953 = torch.constant.int 64
%2691 = torch.aten.mul.int %2690, %int64_953 : !torch.int, !torch.int -> !torch.int
%2692 = torch.aten.add.int %2688, %2691 : !torch.int, !torch.int -> !torch.int
%int3_954 = torch.constant.int 3
%2693 = torch.aten.select.int %2674, %int0_947, %int3_954 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2694 = torch.aten.item %2693 : !torch.vtensor<[1],si64> -> !torch.int
%2695 = torch.aten.eq.int %2694, %int0_947 : !torch.int, !torch.int -> !torch.bool
%2696 = torch.aten.Int.bool %2695 : !torch.bool -> !torch.int
%2697 = torch.aten.mul.int %2696, %int0_947 : !torch.int, !torch.int -> !torch.int
%2698 = torch.aten.add.int %2694, %2697 : !torch.int, !torch.int -> !torch.int
%2699 = torch.prim.ListConstruct %2680, %2686, %2692, %2698 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2700 = torch.aten.reshape %2673, %2699 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_955 = torch.constant.int 1
%int2_956 = torch.constant.int 2
%2701 = torch.aten.transpose.int %2700, %int1_955, %int2_956 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%2702 = torch.vtensor.literal(dense_resource<__125> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_957 = torch.constant.int 0
%int0_958 = torch.constant.int 0
%2703 = torch.aten.select.int %2702, %int0_957, %int0_958 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2704 = torch.aten.item %2703 : !torch.vtensor<[1],si64> -> !torch.int
%2705 = torch.aten.eq.int %2704, %int0_957 : !torch.int, !torch.int -> !torch.bool
%2706 = torch.aten.Int.bool %2705 : !torch.bool -> !torch.int
%int1_959 = torch.constant.int 1
%2707 = torch.aten.mul.int %2706, %int1_959 : !torch.int, !torch.int -> !torch.int
%2708 = torch.aten.add.int %2704, %2707 : !torch.int, !torch.int -> !torch.int
%int1_960 = torch.constant.int 1
%2709 = torch.aten.select.int %2702, %int0_957, %int1_960 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2710 = torch.aten.item %2709 : !torch.vtensor<[1],si64> -> !torch.int
%2711 = torch.aten.eq.int %2710, %int0_957 : !torch.int, !torch.int -> !torch.bool
%2712 = torch.aten.Int.bool %2711 : !torch.bool -> !torch.int
%int6_961 = torch.constant.int 6
%2713 = torch.aten.mul.int %2712, %int6_961 : !torch.int, !torch.int -> !torch.int
%2714 = torch.aten.add.int %2710, %2713 : !torch.int, !torch.int -> !torch.int
%int2_962 = torch.constant.int 2
%2715 = torch.aten.select.int %2702, %int0_957, %int2_962 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2716 = torch.aten.item %2715 : !torch.vtensor<[1],si64> -> !torch.int
%2717 = torch.aten.eq.int %2716, %int0_957 : !torch.int, !torch.int -> !torch.bool
%2718 = torch.aten.Int.bool %2717 : !torch.bool -> !torch.int
%int12_963 = torch.constant.int 12
%2719 = torch.aten.mul.int %2718, %int12_963 : !torch.int, !torch.int -> !torch.int
%2720 = torch.aten.add.int %2716, %2719 : !torch.int, !torch.int -> !torch.int
%2721 = torch.prim.ListConstruct %2708, %2714, %2720 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2722 = torch.aten.reshape %2701, %2721 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%2723 = torch.aten.matmul %2722, %179 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_964 = torch.constant.int 1
%2724 = torch.aten.add.Tensor %91, %2723, %int1_964 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_965 = torch.constant.int 1
%2725 = torch.aten.add.Tensor %2462, %2724, %int1_965 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2726 = torch.vtensor.literal(dense_resource<__126> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_966 = torch.constant.int 0
%int0_967 = torch.constant.int 0
%2727 = torch.aten.select.int %2726, %int0_966, %int0_967 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2728 = torch.aten.item %2727 : !torch.vtensor<[1],si64> -> !torch.int
%2729 = torch.aten.eq.int %2728, %int0_966 : !torch.int, !torch.int -> !torch.bool
%2730 = torch.aten.Int.bool %2729 : !torch.bool -> !torch.int
%int1_968 = torch.constant.int 1
%2731 = torch.aten.mul.int %2730, %int1_968 : !torch.int, !torch.int -> !torch.int
%2732 = torch.aten.add.int %2728, %2731 : !torch.int, !torch.int -> !torch.int
%int1_969 = torch.constant.int 1
%2733 = torch.aten.select.int %2726, %int0_966, %int1_969 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2734 = torch.aten.item %2733 : !torch.vtensor<[1],si64> -> !torch.int
%2735 = torch.aten.eq.int %2734, %int0_966 : !torch.int, !torch.int -> !torch.bool
%2736 = torch.aten.Int.bool %2735 : !torch.bool -> !torch.int
%int6_970 = torch.constant.int 6
%2737 = torch.aten.mul.int %2736, %int6_970 : !torch.int, !torch.int -> !torch.int
%2738 = torch.aten.add.int %2734, %2737 : !torch.int, !torch.int -> !torch.int
%2739 = torch.prim.ListConstruct %2732, %2738 : (!torch.int, !torch.int) -> !torch.list<int>
%2740 = torch.aten.reshape %2725, %2739 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_971 = torch.constant.float 9.9999997473787516E-6
%int768_972 = torch.constant.int 768
%2741 = torch.prim.ListConstruct %int768_972 : (!torch.int) -> !torch.list<int>
%result0_973, %result1_974, %result2_975 = torch.aten.native_layer_norm %2740, %2741, %98, %99, %float9.999990e-06_971 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_976 = torch.constant.int 0
%int1_977 = torch.constant.int 1
%2742 = torch.aten.transpose.int %94, %int0_976, %int1_977 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%2743 = torch.aten.mm %result0_973, %2742 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%2744 = torch.aten.add.Tensor %2743, %95, %int1_977 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%2745 = torch.aten.relu %2744 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_978 = torch.constant.int 0
%int1_979 = torch.constant.int 1
%2746 = torch.aten.transpose.int %96, %int0_978, %int1_979 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%2747 = torch.aten.mm %2745, %2746 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%2748 = torch.aten.add.Tensor %2747, %97, %int1_979 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_980 = torch.constant.int 1
%2749 = torch.aten.add.Tensor %2740, %2748, %int1_980 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%2750 = torch.vtensor.literal(dense_resource<__127> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_981 = torch.constant.int 0
%int0_982 = torch.constant.int 0
%2751 = torch.aten.select.int %2750, %int0_981, %int0_982 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2752 = torch.aten.item %2751 : !torch.vtensor<[1],si64> -> !torch.int
%2753 = torch.aten.eq.int %2752, %int0_981 : !torch.int, !torch.int -> !torch.bool
%2754 = torch.aten.Int.bool %2753 : !torch.bool -> !torch.int
%int6_983 = torch.constant.int 6
%2755 = torch.aten.mul.int %2754, %int6_983 : !torch.int, !torch.int -> !torch.int
%2756 = torch.aten.add.int %2752, %2755 : !torch.int, !torch.int -> !torch.int
%int1_984 = torch.constant.int 1
%2757 = torch.aten.select.int %2750, %int0_981, %int1_984 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2758 = torch.aten.item %2757 : !torch.vtensor<[1],si64> -> !torch.int
%2759 = torch.aten.eq.int %2758, %int0_981 : !torch.int, !torch.int -> !torch.bool
%2760 = torch.aten.Int.bool %2759 : !torch.bool -> !torch.int
%int768_985 = torch.constant.int 768
%2761 = torch.aten.mul.int %2760, %int768_985 : !torch.int, !torch.int -> !torch.int
%2762 = torch.aten.add.int %2758, %2761 : !torch.int, !torch.int -> !torch.int
%int2_986 = torch.constant.int 2
%2763 = torch.aten.select.int %2750, %int0_981, %int2_986 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2764 = torch.aten.item %2763 : !torch.vtensor<[1],si64> -> !torch.int
%2765 = torch.aten.eq.int %2764, %int0_981 : !torch.int, !torch.int -> !torch.bool
%2766 = torch.aten.Int.bool %2765 : !torch.bool -> !torch.int
%2767 = torch.aten.mul.int %2766, %int0_981 : !torch.int, !torch.int -> !torch.int
%2768 = torch.aten.add.int %2764, %2767 : !torch.int, !torch.int -> !torch.int
%2769 = torch.prim.ListConstruct %2756, %2762, %2768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2770 = torch.aten.reshape %2749, %2769 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_987 = torch.constant.float 9.9999997473787516E-6
%int768_988 = torch.constant.int 768
%2771 = torch.prim.ListConstruct %int768_988 : (!torch.int) -> !torch.list<int>
%result0_989, %result1_990, %result2_991 = torch.aten.native_layer_norm %2770, %2771, %104, %105, %float9.999990e-06_987 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%2772 = torch.aten.matmul %result0_989, %180 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_992 = torch.constant.int 1
%2773 = torch.aten.add.Tensor %102, %2772, %int1_992 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2774 = torch.vtensor.literal(dense_resource<__128> : tensor<f32>) : !torch.vtensor<[],f32>
%2775 = torch.aten.mul.Tensor %2773, %2774 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%2776 = torch.aten.matmul %result0_989, %181 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_993 = torch.constant.int 1
%2777 = torch.aten.add.Tensor %100, %2776, %int1_993 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%2778 = torch.vtensor.literal(dense_resource<__129> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%2779 = torch.vtensor.literal(dense_resource<__130> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_994 = torch.constant.int 0
%int0_995 = torch.constant.int 0
%2780 = torch.aten.select.int %2778, %int0_994, %int0_995 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2781 = torch.aten.item %2780 : !torch.vtensor<[1],si64> -> !torch.int
%2782 = torch.aten.eq.int %2781, %int0_994 : !torch.int, !torch.int -> !torch.bool
%2783 = torch.aten.Int.bool %2782 : !torch.bool -> !torch.int
%int1_996 = torch.constant.int 1
%2784 = torch.aten.mul.int %2783, %int1_996 : !torch.int, !torch.int -> !torch.int
%2785 = torch.aten.add.int %2781, %2784 : !torch.int, !torch.int -> !torch.int
%int1_997 = torch.constant.int 1
%2786 = torch.aten.select.int %2778, %int0_994, %int1_997 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2787 = torch.aten.item %2786 : !torch.vtensor<[1],si64> -> !torch.int
%2788 = torch.aten.eq.int %2787, %int0_994 : !torch.int, !torch.int -> !torch.bool
%2789 = torch.aten.Int.bool %2788 : !torch.bool -> !torch.int
%int6_998 = torch.constant.int 6
%2790 = torch.aten.mul.int %2789, %int6_998 : !torch.int, !torch.int -> !torch.int
%2791 = torch.aten.add.int %2787, %2790 : !torch.int, !torch.int -> !torch.int
%int2_999 = torch.constant.int 2
%2792 = torch.aten.select.int %2778, %int0_994, %int2_999 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2793 = torch.aten.item %2792 : !torch.vtensor<[1],si64> -> !torch.int
%2794 = torch.aten.eq.int %2793, %int0_994 : !torch.int, !torch.int -> !torch.bool
%2795 = torch.aten.Int.bool %2794 : !torch.bool -> !torch.int
%int768_1000 = torch.constant.int 768
%2796 = torch.aten.mul.int %2795, %int768_1000 : !torch.int, !torch.int -> !torch.int
%2797 = torch.aten.add.int %2793, %2796 : !torch.int, !torch.int -> !torch.int
%int3_1001 = torch.constant.int 3
%2798 = torch.aten.select.int %2778, %int0_994, %int3_1001 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2799 = torch.aten.item %2798 : !torch.vtensor<[1],si64> -> !torch.int
%2800 = torch.aten.eq.int %2799, %int0_994 : !torch.int, !torch.int -> !torch.bool
%2801 = torch.aten.Int.bool %2800 : !torch.bool -> !torch.int
%2802 = torch.aten.mul.int %2801, %int0_994 : !torch.int, !torch.int -> !torch.int
%2803 = torch.aten.add.int %2799, %2802 : !torch.int, !torch.int -> !torch.int
%2804 = torch.prim.ListConstruct %2785, %2791, %2797, %2803 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2805 = torch.aten.reshape %2777, %2804 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1002 = torch.constant.int 1
%int2_1003 = torch.constant.int 2
%2806 = torch.aten.transpose.int %2805, %int1_1002, %int2_1003 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2807 = torch.aten.matmul %result0_989, %182 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1004 = torch.constant.int 1
%2808 = torch.aten.add.Tensor %101, %2807, %int1_1004 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_1005 = torch.constant.int 0
%int0_1006 = torch.constant.int 0
%2809 = torch.aten.select.int %2779, %int0_1005, %int0_1006 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2810 = torch.aten.item %2809 : !torch.vtensor<[1],si64> -> !torch.int
%2811 = torch.aten.eq.int %2810, %int0_1005 : !torch.int, !torch.int -> !torch.bool
%2812 = torch.aten.Int.bool %2811 : !torch.bool -> !torch.int
%int1_1007 = torch.constant.int 1
%2813 = torch.aten.mul.int %2812, %int1_1007 : !torch.int, !torch.int -> !torch.int
%2814 = torch.aten.add.int %2810, %2813 : !torch.int, !torch.int -> !torch.int
%int1_1008 = torch.constant.int 1
%2815 = torch.aten.select.int %2779, %int0_1005, %int1_1008 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2816 = torch.aten.item %2815 : !torch.vtensor<[1],si64> -> !torch.int
%2817 = torch.aten.eq.int %2816, %int0_1005 : !torch.int, !torch.int -> !torch.bool
%2818 = torch.aten.Int.bool %2817 : !torch.bool -> !torch.int
%int6_1009 = torch.constant.int 6
%2819 = torch.aten.mul.int %2818, %int6_1009 : !torch.int, !torch.int -> !torch.int
%2820 = torch.aten.add.int %2816, %2819 : !torch.int, !torch.int -> !torch.int
%int2_1010 = torch.constant.int 2
%2821 = torch.aten.select.int %2779, %int0_1005, %int2_1010 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2822 = torch.aten.item %2821 : !torch.vtensor<[1],si64> -> !torch.int
%2823 = torch.aten.eq.int %2822, %int0_1005 : !torch.int, !torch.int -> !torch.bool
%2824 = torch.aten.Int.bool %2823 : !torch.bool -> !torch.int
%int768_1011 = torch.constant.int 768
%2825 = torch.aten.mul.int %2824, %int768_1011 : !torch.int, !torch.int -> !torch.int
%2826 = torch.aten.add.int %2822, %2825 : !torch.int, !torch.int -> !torch.int
%int3_1012 = torch.constant.int 3
%2827 = torch.aten.select.int %2779, %int0_1005, %int3_1012 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2828 = torch.aten.item %2827 : !torch.vtensor<[1],si64> -> !torch.int
%2829 = torch.aten.eq.int %2828, %int0_1005 : !torch.int, !torch.int -> !torch.bool
%2830 = torch.aten.Int.bool %2829 : !torch.bool -> !torch.int
%2831 = torch.aten.mul.int %2830, %int0_1005 : !torch.int, !torch.int -> !torch.int
%2832 = torch.aten.add.int %2828, %2831 : !torch.int, !torch.int -> !torch.int
%2833 = torch.prim.ListConstruct %2814, %2820, %2826, %2832 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2834 = torch.aten.reshape %2808, %2833 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1013 = torch.constant.int 1
%int2_1014 = torch.constant.int 2
%2835 = torch.aten.transpose.int %2834, %int1_1013, %int2_1014 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2836 = torch.vtensor.literal(dense_resource<__131> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1015 = torch.constant.int 0
%int0_1016 = torch.constant.int 0
%2837 = torch.aten.select.int %2836, %int0_1015, %int0_1016 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2838 = torch.aten.item %2837 : !torch.vtensor<[1],si64> -> !torch.int
%2839 = torch.aten.eq.int %2838, %int0_1015 : !torch.int, !torch.int -> !torch.bool
%2840 = torch.aten.Int.bool %2839 : !torch.bool -> !torch.int
%int1_1017 = torch.constant.int 1
%2841 = torch.aten.mul.int %2840, %int1_1017 : !torch.int, !torch.int -> !torch.int
%2842 = torch.aten.add.int %2838, %2841 : !torch.int, !torch.int -> !torch.int
%int1_1018 = torch.constant.int 1
%2843 = torch.aten.select.int %2836, %int0_1015, %int1_1018 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2844 = torch.aten.item %2843 : !torch.vtensor<[1],si64> -> !torch.int
%2845 = torch.aten.eq.int %2844, %int0_1015 : !torch.int, !torch.int -> !torch.bool
%2846 = torch.aten.Int.bool %2845 : !torch.bool -> !torch.int
%int6_1019 = torch.constant.int 6
%2847 = torch.aten.mul.int %2846, %int6_1019 : !torch.int, !torch.int -> !torch.int
%2848 = torch.aten.add.int %2844, %2847 : !torch.int, !torch.int -> !torch.int
%int2_1020 = torch.constant.int 2
%2849 = torch.aten.select.int %2836, %int0_1015, %int2_1020 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2850 = torch.aten.item %2849 : !torch.vtensor<[1],si64> -> !torch.int
%2851 = torch.aten.eq.int %2850, %int0_1015 : !torch.int, !torch.int -> !torch.bool
%2852 = torch.aten.Int.bool %2851 : !torch.bool -> !torch.int
%int768_1021 = torch.constant.int 768
%2853 = torch.aten.mul.int %2852, %int768_1021 : !torch.int, !torch.int -> !torch.int
%2854 = torch.aten.add.int %2850, %2853 : !torch.int, !torch.int -> !torch.int
%int3_1022 = torch.constant.int 3
%2855 = torch.aten.select.int %2836, %int0_1015, %int3_1022 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2856 = torch.aten.item %2855 : !torch.vtensor<[1],si64> -> !torch.int
%2857 = torch.aten.eq.int %2856, %int0_1015 : !torch.int, !torch.int -> !torch.bool
%2858 = torch.aten.Int.bool %2857 : !torch.bool -> !torch.int
%2859 = torch.aten.mul.int %2858, %int0_1015 : !torch.int, !torch.int -> !torch.int
%2860 = torch.aten.add.int %2856, %2859 : !torch.int, !torch.int -> !torch.int
%2861 = torch.prim.ListConstruct %2842, %2848, %2854, %2860 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2862 = torch.aten.reshape %2775, %2861 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1023 = torch.constant.int 1
%int2_1024 = torch.constant.int 2
%2863 = torch.aten.transpose.int %2862, %int1_1023, %int2_1024 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%2864 = torch.vtensor.literal(dense_resource<__132> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%2865 = torch.vtensor.literal(dense_resource<__133> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%2866 = torch.vtensor.literal(dense_resource<__134> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1025 = torch.constant.int 0
%int0_1026 = torch.constant.int 0
%2867 = torch.aten.select.int %2864, %int0_1025, %int0_1026 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2868 = torch.aten.item %2867 : !torch.vtensor<[1],si64> -> !torch.int
%2869 = torch.aten.eq.int %2868, %int0_1025 : !torch.int, !torch.int -> !torch.bool
%2870 = torch.aten.Int.bool %2869 : !torch.bool -> !torch.int
%int1_1027 = torch.constant.int 1
%2871 = torch.aten.mul.int %2870, %int1_1027 : !torch.int, !torch.int -> !torch.int
%2872 = torch.aten.add.int %2868, %2871 : !torch.int, !torch.int -> !torch.int
%int1_1028 = torch.constant.int 1
%2873 = torch.aten.select.int %2864, %int0_1025, %int1_1028 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2874 = torch.aten.item %2873 : !torch.vtensor<[1],si64> -> !torch.int
%2875 = torch.aten.eq.int %2874, %int0_1025 : !torch.int, !torch.int -> !torch.bool
%2876 = torch.aten.Int.bool %2875 : !torch.bool -> !torch.int
%int12_1029 = torch.constant.int 12
%2877 = torch.aten.mul.int %2876, %int12_1029 : !torch.int, !torch.int -> !torch.int
%2878 = torch.aten.add.int %2874, %2877 : !torch.int, !torch.int -> !torch.int
%int2_1030 = torch.constant.int 2
%2879 = torch.aten.select.int %2864, %int0_1025, %int2_1030 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2880 = torch.aten.item %2879 : !torch.vtensor<[1],si64> -> !torch.int
%2881 = torch.aten.eq.int %2880, %int0_1025 : !torch.int, !torch.int -> !torch.bool
%2882 = torch.aten.Int.bool %2881 : !torch.bool -> !torch.int
%int6_1031 = torch.constant.int 6
%2883 = torch.aten.mul.int %2882, %int6_1031 : !torch.int, !torch.int -> !torch.int
%2884 = torch.aten.add.int %2880, %2883 : !torch.int, !torch.int -> !torch.int
%2885 = torch.prim.ListConstruct %2872, %2878, %2884 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2886 = torch.aten.reshape %2863, %2885 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1032 = torch.constant.int 0
%int0_1033 = torch.constant.int 0
%2887 = torch.aten.select.int %2865, %int0_1032, %int0_1033 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2888 = torch.aten.item %2887 : !torch.vtensor<[1],si64> -> !torch.int
%2889 = torch.aten.eq.int %2888, %int0_1032 : !torch.int, !torch.int -> !torch.bool
%2890 = torch.aten.Int.bool %2889 : !torch.bool -> !torch.int
%int1_1034 = torch.constant.int 1
%2891 = torch.aten.mul.int %2890, %int1_1034 : !torch.int, !torch.int -> !torch.int
%2892 = torch.aten.add.int %2888, %2891 : !torch.int, !torch.int -> !torch.int
%int1_1035 = torch.constant.int 1
%2893 = torch.aten.select.int %2865, %int0_1032, %int1_1035 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2894 = torch.aten.item %2893 : !torch.vtensor<[1],si64> -> !torch.int
%2895 = torch.aten.eq.int %2894, %int0_1032 : !torch.int, !torch.int -> !torch.bool
%2896 = torch.aten.Int.bool %2895 : !torch.bool -> !torch.int
%int12_1036 = torch.constant.int 12
%2897 = torch.aten.mul.int %2896, %int12_1036 : !torch.int, !torch.int -> !torch.int
%2898 = torch.aten.add.int %2894, %2897 : !torch.int, !torch.int -> !torch.int
%int2_1037 = torch.constant.int 2
%2899 = torch.aten.select.int %2865, %int0_1032, %int2_1037 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2900 = torch.aten.item %2899 : !torch.vtensor<[1],si64> -> !torch.int
%2901 = torch.aten.eq.int %2900, %int0_1032 : !torch.int, !torch.int -> !torch.bool
%2902 = torch.aten.Int.bool %2901 : !torch.bool -> !torch.int
%int6_1038 = torch.constant.int 6
%2903 = torch.aten.mul.int %2902, %int6_1038 : !torch.int, !torch.int -> !torch.int
%2904 = torch.aten.add.int %2900, %2903 : !torch.int, !torch.int -> !torch.int
%2905 = torch.prim.ListConstruct %2892, %2898, %2904 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2906 = torch.aten.reshape %2806, %2905 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1039 = torch.constant.int 0
%int0_1040 = torch.constant.int 0
%2907 = torch.aten.select.int %2866, %int0_1039, %int0_1040 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2908 = torch.aten.item %2907 : !torch.vtensor<[1],si64> -> !torch.int
%2909 = torch.aten.eq.int %2908, %int0_1039 : !torch.int, !torch.int -> !torch.bool
%2910 = torch.aten.Int.bool %2909 : !torch.bool -> !torch.int
%int1_1041 = torch.constant.int 1
%2911 = torch.aten.mul.int %2910, %int1_1041 : !torch.int, !torch.int -> !torch.int
%2912 = torch.aten.add.int %2908, %2911 : !torch.int, !torch.int -> !torch.int
%int1_1042 = torch.constant.int 1
%2913 = torch.aten.select.int %2866, %int0_1039, %int1_1042 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2914 = torch.aten.item %2913 : !torch.vtensor<[1],si64> -> !torch.int
%2915 = torch.aten.eq.int %2914, %int0_1039 : !torch.int, !torch.int -> !torch.bool
%2916 = torch.aten.Int.bool %2915 : !torch.bool -> !torch.int
%int12_1043 = torch.constant.int 12
%2917 = torch.aten.mul.int %2916, %int12_1043 : !torch.int, !torch.int -> !torch.int
%2918 = torch.aten.add.int %2914, %2917 : !torch.int, !torch.int -> !torch.int
%int2_1044 = torch.constant.int 2
%2919 = torch.aten.select.int %2866, %int0_1039, %int2_1044 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2920 = torch.aten.item %2919 : !torch.vtensor<[1],si64> -> !torch.int
%2921 = torch.aten.eq.int %2920, %int0_1039 : !torch.int, !torch.int -> !torch.bool
%2922 = torch.aten.Int.bool %2921 : !torch.bool -> !torch.int
%int6_1045 = torch.constant.int 6
%2923 = torch.aten.mul.int %2922, %int6_1045 : !torch.int, !torch.int -> !torch.int
%2924 = torch.aten.add.int %2920, %2923 : !torch.int, !torch.int -> !torch.int
%2925 = torch.prim.ListConstruct %2912, %2918, %2924 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2926 = torch.aten.reshape %2835, %2925 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_1046 = torch.constant.int 1
%int2_1047 = torch.constant.int 2
%2927 = torch.aten.transpose.int %2906, %int1_1046, %int2_1047 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%2928 = torch.aten.matmul %2886, %2927 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%2929 = torch.vtensor.literal(dense_resource<__135> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1048 = torch.constant.int 0
%int0_1049 = torch.constant.int 0
%2930 = torch.aten.select.int %2929, %int0_1048, %int0_1049 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2931 = torch.aten.item %2930 : !torch.vtensor<[1],si64> -> !torch.int
%2932 = torch.aten.eq.int %2931, %int0_1048 : !torch.int, !torch.int -> !torch.bool
%2933 = torch.aten.Int.bool %2932 : !torch.bool -> !torch.int
%int12_1050 = torch.constant.int 12
%2934 = torch.aten.mul.int %2933, %int12_1050 : !torch.int, !torch.int -> !torch.int
%2935 = torch.aten.add.int %2931, %2934 : !torch.int, !torch.int -> !torch.int
%int1_1051 = torch.constant.int 1
%2936 = torch.aten.select.int %2929, %int0_1048, %int1_1051 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2937 = torch.aten.item %2936 : !torch.vtensor<[1],si64> -> !torch.int
%2938 = torch.aten.eq.int %2937, %int0_1048 : !torch.int, !torch.int -> !torch.bool
%2939 = torch.aten.Int.bool %2938 : !torch.bool -> !torch.int
%int6_1052 = torch.constant.int 6
%2940 = torch.aten.mul.int %2939, %int6_1052 : !torch.int, !torch.int -> !torch.int
%2941 = torch.aten.add.int %2937, %2940 : !torch.int, !torch.int -> !torch.int
%int2_1053 = torch.constant.int 2
%2942 = torch.aten.select.int %2929, %int0_1048, %int2_1053 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2943 = torch.aten.item %2942 : !torch.vtensor<[1],si64> -> !torch.int
%2944 = torch.aten.eq.int %2943, %int0_1048 : !torch.int, !torch.int -> !torch.bool
%2945 = torch.aten.Int.bool %2944 : !torch.bool -> !torch.int
%int6_1054 = torch.constant.int 6
%2946 = torch.aten.mul.int %2945, %int6_1054 : !torch.int, !torch.int -> !torch.int
%2947 = torch.aten.add.int %2943, %2946 : !torch.int, !torch.int -> !torch.int
%int3_1055 = torch.constant.int 3
%2948 = torch.aten.select.int %2929, %int0_1048, %int3_1055 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2949 = torch.aten.item %2948 : !torch.vtensor<[1],si64> -> !torch.int
%2950 = torch.aten.eq.int %2949, %int0_1048 : !torch.int, !torch.int -> !torch.bool
%2951 = torch.aten.Int.bool %2950 : !torch.bool -> !torch.int
%2952 = torch.aten.mul.int %2951, %int0_1048 : !torch.int, !torch.int -> !torch.int
%2953 = torch.aten.add.int %2949, %2952 : !torch.int, !torch.int -> !torch.int
%2954 = torch.prim.ListConstruct %2935, %2941, %2947, %2953 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2955 = torch.aten.reshape %2928, %2954 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_1056 = torch.constant.int 1
%2956 = torch.aten.add.Tensor %2955, %277, %int1_1056 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%2957 = torch.vtensor.literal(dense_resource<__136> : tensor<f32>) : !torch.vtensor<[],f32>
%2958 = torch.aten.maximum %2956, %2957 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%2959 = torch.vtensor.literal(dense_resource<__137> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1057 = torch.constant.int 0
%int0_1058 = torch.constant.int 0
%2960 = torch.aten.select.int %2959, %int0_1057, %int0_1058 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2961 = torch.aten.item %2960 : !torch.vtensor<[1],si64> -> !torch.int
%2962 = torch.aten.eq.int %2961, %int0_1057 : !torch.int, !torch.int -> !torch.bool
%2963 = torch.aten.Int.bool %2962 : !torch.bool -> !torch.int
%int-1_1059 = torch.constant.int -1
%2964 = torch.aten.mul.int %2963, %int-1_1059 : !torch.int, !torch.int -> !torch.int
%2965 = torch.aten.add.int %2961, %2964 : !torch.int, !torch.int -> !torch.int
%int1_1060 = torch.constant.int 1
%2966 = torch.aten.select.int %2959, %int0_1057, %int1_1060 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2967 = torch.aten.item %2966 : !torch.vtensor<[1],si64> -> !torch.int
%2968 = torch.aten.eq.int %2967, %int0_1057 : !torch.int, !torch.int -> !torch.bool
%2969 = torch.aten.Int.bool %2968 : !torch.bool -> !torch.int
%int12_1061 = torch.constant.int 12
%2970 = torch.aten.mul.int %2969, %int12_1061 : !torch.int, !torch.int -> !torch.int
%2971 = torch.aten.add.int %2967, %2970 : !torch.int, !torch.int -> !torch.int
%int2_1062 = torch.constant.int 2
%2972 = torch.aten.select.int %2959, %int0_1057, %int2_1062 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2973 = torch.aten.item %2972 : !torch.vtensor<[1],si64> -> !torch.int
%2974 = torch.aten.eq.int %2973, %int0_1057 : !torch.int, !torch.int -> !torch.bool
%2975 = torch.aten.Int.bool %2974 : !torch.bool -> !torch.int
%int6_1063 = torch.constant.int 6
%2976 = torch.aten.mul.int %2975, %int6_1063 : !torch.int, !torch.int -> !torch.int
%2977 = torch.aten.add.int %2973, %2976 : !torch.int, !torch.int -> !torch.int
%2978 = torch.prim.ListConstruct %2965, %2971, %2977 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2979 = torch.aten.reshape %2958, %2978 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_1064 = torch.constant.int 2
%none_1065 = torch.constant.none
%2980 = torch.aten.softmax.int %2979, %int2_1064, %none_1065 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%2981 = torch.aten.matmul %2980, %2926 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%2982 = torch.vtensor.literal(dense_resource<__138> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1066 = torch.constant.int 0
%int0_1067 = torch.constant.int 0
%2983 = torch.aten.select.int %2982, %int0_1066, %int0_1067 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2984 = torch.aten.item %2983 : !torch.vtensor<[1],si64> -> !torch.int
%2985 = torch.aten.eq.int %2984, %int0_1066 : !torch.int, !torch.int -> !torch.bool
%2986 = torch.aten.Int.bool %2985 : !torch.bool -> !torch.int
%int12_1068 = torch.constant.int 12
%2987 = torch.aten.mul.int %2986, %int12_1068 : !torch.int, !torch.int -> !torch.int
%2988 = torch.aten.add.int %2984, %2987 : !torch.int, !torch.int -> !torch.int
%int1_1069 = torch.constant.int 1
%2989 = torch.aten.select.int %2982, %int0_1066, %int1_1069 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2990 = torch.aten.item %2989 : !torch.vtensor<[1],si64> -> !torch.int
%2991 = torch.aten.eq.int %2990, %int0_1066 : !torch.int, !torch.int -> !torch.bool
%2992 = torch.aten.Int.bool %2991 : !torch.bool -> !torch.int
%int6_1070 = torch.constant.int 6
%2993 = torch.aten.mul.int %2992, %int6_1070 : !torch.int, !torch.int -> !torch.int
%2994 = torch.aten.add.int %2990, %2993 : !torch.int, !torch.int -> !torch.int
%int2_1071 = torch.constant.int 2
%2995 = torch.aten.select.int %2982, %int0_1066, %int2_1071 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2996 = torch.aten.item %2995 : !torch.vtensor<[1],si64> -> !torch.int
%2997 = torch.aten.eq.int %2996, %int0_1066 : !torch.int, !torch.int -> !torch.bool
%2998 = torch.aten.Int.bool %2997 : !torch.bool -> !torch.int
%int64_1072 = torch.constant.int 64
%2999 = torch.aten.mul.int %2998, %int64_1072 : !torch.int, !torch.int -> !torch.int
%3000 = torch.aten.add.int %2996, %2999 : !torch.int, !torch.int -> !torch.int
%int3_1073 = torch.constant.int 3
%3001 = torch.aten.select.int %2982, %int0_1066, %int3_1073 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3002 = torch.aten.item %3001 : !torch.vtensor<[1],si64> -> !torch.int
%3003 = torch.aten.eq.int %3002, %int0_1066 : !torch.int, !torch.int -> !torch.bool
%3004 = torch.aten.Int.bool %3003 : !torch.bool -> !torch.int
%3005 = torch.aten.mul.int %3004, %int0_1066 : !torch.int, !torch.int -> !torch.int
%3006 = torch.aten.add.int %3002, %3005 : !torch.int, !torch.int -> !torch.int
%3007 = torch.prim.ListConstruct %2988, %2994, %3000, %3006 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3008 = torch.aten.reshape %2981, %3007 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_1074 = torch.constant.int 1
%int2_1075 = torch.constant.int 2
%3009 = torch.aten.transpose.int %3008, %int1_1074, %int2_1075 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%3010 = torch.vtensor.literal(dense_resource<__139> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1076 = torch.constant.int 0
%int0_1077 = torch.constant.int 0
%3011 = torch.aten.select.int %3010, %int0_1076, %int0_1077 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3012 = torch.aten.item %3011 : !torch.vtensor<[1],si64> -> !torch.int
%3013 = torch.aten.eq.int %3012, %int0_1076 : !torch.int, !torch.int -> !torch.bool
%3014 = torch.aten.Int.bool %3013 : !torch.bool -> !torch.int
%int1_1078 = torch.constant.int 1
%3015 = torch.aten.mul.int %3014, %int1_1078 : !torch.int, !torch.int -> !torch.int
%3016 = torch.aten.add.int %3012, %3015 : !torch.int, !torch.int -> !torch.int
%int1_1079 = torch.constant.int 1
%3017 = torch.aten.select.int %3010, %int0_1076, %int1_1079 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3018 = torch.aten.item %3017 : !torch.vtensor<[1],si64> -> !torch.int
%3019 = torch.aten.eq.int %3018, %int0_1076 : !torch.int, !torch.int -> !torch.bool
%3020 = torch.aten.Int.bool %3019 : !torch.bool -> !torch.int
%int6_1080 = torch.constant.int 6
%3021 = torch.aten.mul.int %3020, %int6_1080 : !torch.int, !torch.int -> !torch.int
%3022 = torch.aten.add.int %3018, %3021 : !torch.int, !torch.int -> !torch.int
%int2_1081 = torch.constant.int 2
%3023 = torch.aten.select.int %3010, %int0_1076, %int2_1081 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3024 = torch.aten.item %3023 : !torch.vtensor<[1],si64> -> !torch.int
%3025 = torch.aten.eq.int %3024, %int0_1076 : !torch.int, !torch.int -> !torch.bool
%3026 = torch.aten.Int.bool %3025 : !torch.bool -> !torch.int
%int12_1082 = torch.constant.int 12
%3027 = torch.aten.mul.int %3026, %int12_1082 : !torch.int, !torch.int -> !torch.int
%3028 = torch.aten.add.int %3024, %3027 : !torch.int, !torch.int -> !torch.int
%3029 = torch.prim.ListConstruct %3016, %3022, %3028 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3030 = torch.aten.reshape %3009, %3029 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%3031 = torch.aten.matmul %3030, %183 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1083 = torch.constant.int 1
%3032 = torch.aten.add.Tensor %103, %3031, %int1_1083 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_1084 = torch.constant.int 1
%3033 = torch.aten.add.Tensor %2770, %3032, %int1_1084 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3034 = torch.vtensor.literal(dense_resource<__140> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_1085 = torch.constant.int 0
%int0_1086 = torch.constant.int 0
%3035 = torch.aten.select.int %3034, %int0_1085, %int0_1086 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3036 = torch.aten.item %3035 : !torch.vtensor<[1],si64> -> !torch.int
%3037 = torch.aten.eq.int %3036, %int0_1085 : !torch.int, !torch.int -> !torch.bool
%3038 = torch.aten.Int.bool %3037 : !torch.bool -> !torch.int
%int1_1087 = torch.constant.int 1
%3039 = torch.aten.mul.int %3038, %int1_1087 : !torch.int, !torch.int -> !torch.int
%3040 = torch.aten.add.int %3036, %3039 : !torch.int, !torch.int -> !torch.int
%int1_1088 = torch.constant.int 1
%3041 = torch.aten.select.int %3034, %int0_1085, %int1_1088 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3042 = torch.aten.item %3041 : !torch.vtensor<[1],si64> -> !torch.int
%3043 = torch.aten.eq.int %3042, %int0_1085 : !torch.int, !torch.int -> !torch.bool
%3044 = torch.aten.Int.bool %3043 : !torch.bool -> !torch.int
%int6_1089 = torch.constant.int 6
%3045 = torch.aten.mul.int %3044, %int6_1089 : !torch.int, !torch.int -> !torch.int
%3046 = torch.aten.add.int %3042, %3045 : !torch.int, !torch.int -> !torch.int
%3047 = torch.prim.ListConstruct %3040, %3046 : (!torch.int, !torch.int) -> !torch.list<int>
%3048 = torch.aten.reshape %3033, %3047 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_1090 = torch.constant.float 9.9999997473787516E-6
%int768_1091 = torch.constant.int 768
%3049 = torch.prim.ListConstruct %int768_1091 : (!torch.int) -> !torch.list<int>
%result0_1092, %result1_1093, %result2_1094 = torch.aten.native_layer_norm %3048, %3049, %110, %111, %float9.999990e-06_1090 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_1095 = torch.constant.int 0
%int1_1096 = torch.constant.int 1
%3050 = torch.aten.transpose.int %106, %int0_1095, %int1_1096 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%3051 = torch.aten.mm %result0_1092, %3050 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%3052 = torch.aten.add.Tensor %3051, %107, %int1_1096 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%3053 = torch.aten.relu %3052 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_1097 = torch.constant.int 0
%int1_1098 = torch.constant.int 1
%3054 = torch.aten.transpose.int %108, %int0_1097, %int1_1098 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%3055 = torch.aten.mm %3053, %3054 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%3056 = torch.aten.add.Tensor %3055, %109, %int1_1098 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_1099 = torch.constant.int 1
%3057 = torch.aten.add.Tensor %3048, %3056, %int1_1099 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%3058 = torch.vtensor.literal(dense_resource<__141> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1100 = torch.constant.int 0
%int0_1101 = torch.constant.int 0
%3059 = torch.aten.select.int %3058, %int0_1100, %int0_1101 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3060 = torch.aten.item %3059 : !torch.vtensor<[1],si64> -> !torch.int
%3061 = torch.aten.eq.int %3060, %int0_1100 : !torch.int, !torch.int -> !torch.bool
%3062 = torch.aten.Int.bool %3061 : !torch.bool -> !torch.int
%int6_1102 = torch.constant.int 6
%3063 = torch.aten.mul.int %3062, %int6_1102 : !torch.int, !torch.int -> !torch.int
%3064 = torch.aten.add.int %3060, %3063 : !torch.int, !torch.int -> !torch.int
%int1_1103 = torch.constant.int 1
%3065 = torch.aten.select.int %3058, %int0_1100, %int1_1103 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3066 = torch.aten.item %3065 : !torch.vtensor<[1],si64> -> !torch.int
%3067 = torch.aten.eq.int %3066, %int0_1100 : !torch.int, !torch.int -> !torch.bool
%3068 = torch.aten.Int.bool %3067 : !torch.bool -> !torch.int
%int768_1104 = torch.constant.int 768
%3069 = torch.aten.mul.int %3068, %int768_1104 : !torch.int, !torch.int -> !torch.int
%3070 = torch.aten.add.int %3066, %3069 : !torch.int, !torch.int -> !torch.int
%int2_1105 = torch.constant.int 2
%3071 = torch.aten.select.int %3058, %int0_1100, %int2_1105 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3072 = torch.aten.item %3071 : !torch.vtensor<[1],si64> -> !torch.int
%3073 = torch.aten.eq.int %3072, %int0_1100 : !torch.int, !torch.int -> !torch.bool
%3074 = torch.aten.Int.bool %3073 : !torch.bool -> !torch.int
%3075 = torch.aten.mul.int %3074, %int0_1100 : !torch.int, !torch.int -> !torch.int
%3076 = torch.aten.add.int %3072, %3075 : !torch.int, !torch.int -> !torch.int
%3077 = torch.prim.ListConstruct %3064, %3070, %3076 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3078 = torch.aten.reshape %3057, %3077 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_1106 = torch.constant.float 9.9999997473787516E-6
%int768_1107 = torch.constant.int 768
%3079 = torch.prim.ListConstruct %int768_1107 : (!torch.int) -> !torch.list<int>
%result0_1108, %result1_1109, %result2_1110 = torch.aten.native_layer_norm %3078, %3079, %116, %117, %float9.999990e-06_1106 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%3080 = torch.aten.matmul %result0_1108, %184 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1111 = torch.constant.int 1
%3081 = torch.aten.add.Tensor %114, %3080, %int1_1111 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3082 = torch.vtensor.literal(dense_resource<__142> : tensor<f32>) : !torch.vtensor<[],f32>
%3083 = torch.aten.mul.Tensor %3081, %3082 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%3084 = torch.aten.matmul %result0_1108, %185 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1112 = torch.constant.int 1
%3085 = torch.aten.add.Tensor %112, %3084, %int1_1112 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3086 = torch.vtensor.literal(dense_resource<__143> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%3087 = torch.vtensor.literal(dense_resource<__144> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1113 = torch.constant.int 0
%int0_1114 = torch.constant.int 0
%3088 = torch.aten.select.int %3086, %int0_1113, %int0_1114 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3089 = torch.aten.item %3088 : !torch.vtensor<[1],si64> -> !torch.int
%3090 = torch.aten.eq.int %3089, %int0_1113 : !torch.int, !torch.int -> !torch.bool
%3091 = torch.aten.Int.bool %3090 : !torch.bool -> !torch.int
%int1_1115 = torch.constant.int 1
%3092 = torch.aten.mul.int %3091, %int1_1115 : !torch.int, !torch.int -> !torch.int
%3093 = torch.aten.add.int %3089, %3092 : !torch.int, !torch.int -> !torch.int
%int1_1116 = torch.constant.int 1
%3094 = torch.aten.select.int %3086, %int0_1113, %int1_1116 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3095 = torch.aten.item %3094 : !torch.vtensor<[1],si64> -> !torch.int
%3096 = torch.aten.eq.int %3095, %int0_1113 : !torch.int, !torch.int -> !torch.bool
%3097 = torch.aten.Int.bool %3096 : !torch.bool -> !torch.int
%int6_1117 = torch.constant.int 6
%3098 = torch.aten.mul.int %3097, %int6_1117 : !torch.int, !torch.int -> !torch.int
%3099 = torch.aten.add.int %3095, %3098 : !torch.int, !torch.int -> !torch.int
%int2_1118 = torch.constant.int 2
%3100 = torch.aten.select.int %3086, %int0_1113, %int2_1118 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3101 = torch.aten.item %3100 : !torch.vtensor<[1],si64> -> !torch.int
%3102 = torch.aten.eq.int %3101, %int0_1113 : !torch.int, !torch.int -> !torch.bool
%3103 = torch.aten.Int.bool %3102 : !torch.bool -> !torch.int
%int768_1119 = torch.constant.int 768
%3104 = torch.aten.mul.int %3103, %int768_1119 : !torch.int, !torch.int -> !torch.int
%3105 = torch.aten.add.int %3101, %3104 : !torch.int, !torch.int -> !torch.int
%int3_1120 = torch.constant.int 3
%3106 = torch.aten.select.int %3086, %int0_1113, %int3_1120 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3107 = torch.aten.item %3106 : !torch.vtensor<[1],si64> -> !torch.int
%3108 = torch.aten.eq.int %3107, %int0_1113 : !torch.int, !torch.int -> !torch.bool
%3109 = torch.aten.Int.bool %3108 : !torch.bool -> !torch.int
%3110 = torch.aten.mul.int %3109, %int0_1113 : !torch.int, !torch.int -> !torch.int
%3111 = torch.aten.add.int %3107, %3110 : !torch.int, !torch.int -> !torch.int
%3112 = torch.prim.ListConstruct %3093, %3099, %3105, %3111 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3113 = torch.aten.reshape %3085, %3112 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1121 = torch.constant.int 1
%int2_1122 = torch.constant.int 2
%3114 = torch.aten.transpose.int %3113, %int1_1121, %int2_1122 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3115 = torch.aten.matmul %result0_1108, %186 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1123 = torch.constant.int 1
%3116 = torch.aten.add.Tensor %113, %3115, %int1_1123 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_1124 = torch.constant.int 0
%int0_1125 = torch.constant.int 0
%3117 = torch.aten.select.int %3087, %int0_1124, %int0_1125 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3118 = torch.aten.item %3117 : !torch.vtensor<[1],si64> -> !torch.int
%3119 = torch.aten.eq.int %3118, %int0_1124 : !torch.int, !torch.int -> !torch.bool
%3120 = torch.aten.Int.bool %3119 : !torch.bool -> !torch.int
%int1_1126 = torch.constant.int 1
%3121 = torch.aten.mul.int %3120, %int1_1126 : !torch.int, !torch.int -> !torch.int
%3122 = torch.aten.add.int %3118, %3121 : !torch.int, !torch.int -> !torch.int
%int1_1127 = torch.constant.int 1
%3123 = torch.aten.select.int %3087, %int0_1124, %int1_1127 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3124 = torch.aten.item %3123 : !torch.vtensor<[1],si64> -> !torch.int
%3125 = torch.aten.eq.int %3124, %int0_1124 : !torch.int, !torch.int -> !torch.bool
%3126 = torch.aten.Int.bool %3125 : !torch.bool -> !torch.int
%int6_1128 = torch.constant.int 6
%3127 = torch.aten.mul.int %3126, %int6_1128 : !torch.int, !torch.int -> !torch.int
%3128 = torch.aten.add.int %3124, %3127 : !torch.int, !torch.int -> !torch.int
%int2_1129 = torch.constant.int 2
%3129 = torch.aten.select.int %3087, %int0_1124, %int2_1129 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3130 = torch.aten.item %3129 : !torch.vtensor<[1],si64> -> !torch.int
%3131 = torch.aten.eq.int %3130, %int0_1124 : !torch.int, !torch.int -> !torch.bool
%3132 = torch.aten.Int.bool %3131 : !torch.bool -> !torch.int
%int768_1130 = torch.constant.int 768
%3133 = torch.aten.mul.int %3132, %int768_1130 : !torch.int, !torch.int -> !torch.int
%3134 = torch.aten.add.int %3130, %3133 : !torch.int, !torch.int -> !torch.int
%int3_1131 = torch.constant.int 3
%3135 = torch.aten.select.int %3087, %int0_1124, %int3_1131 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3136 = torch.aten.item %3135 : !torch.vtensor<[1],si64> -> !torch.int
%3137 = torch.aten.eq.int %3136, %int0_1124 : !torch.int, !torch.int -> !torch.bool
%3138 = torch.aten.Int.bool %3137 : !torch.bool -> !torch.int
%3139 = torch.aten.mul.int %3138, %int0_1124 : !torch.int, !torch.int -> !torch.int
%3140 = torch.aten.add.int %3136, %3139 : !torch.int, !torch.int -> !torch.int
%3141 = torch.prim.ListConstruct %3122, %3128, %3134, %3140 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3142 = torch.aten.reshape %3116, %3141 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1132 = torch.constant.int 1
%int2_1133 = torch.constant.int 2
%3143 = torch.aten.transpose.int %3142, %int1_1132, %int2_1133 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3144 = torch.vtensor.literal(dense_resource<__145> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1134 = torch.constant.int 0
%int0_1135 = torch.constant.int 0
%3145 = torch.aten.select.int %3144, %int0_1134, %int0_1135 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3146 = torch.aten.item %3145 : !torch.vtensor<[1],si64> -> !torch.int
%3147 = torch.aten.eq.int %3146, %int0_1134 : !torch.int, !torch.int -> !torch.bool
%3148 = torch.aten.Int.bool %3147 : !torch.bool -> !torch.int
%int1_1136 = torch.constant.int 1
%3149 = torch.aten.mul.int %3148, %int1_1136 : !torch.int, !torch.int -> !torch.int
%3150 = torch.aten.add.int %3146, %3149 : !torch.int, !torch.int -> !torch.int
%int1_1137 = torch.constant.int 1
%3151 = torch.aten.select.int %3144, %int0_1134, %int1_1137 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3152 = torch.aten.item %3151 : !torch.vtensor<[1],si64> -> !torch.int
%3153 = torch.aten.eq.int %3152, %int0_1134 : !torch.int, !torch.int -> !torch.bool
%3154 = torch.aten.Int.bool %3153 : !torch.bool -> !torch.int
%int6_1138 = torch.constant.int 6
%3155 = torch.aten.mul.int %3154, %int6_1138 : !torch.int, !torch.int -> !torch.int
%3156 = torch.aten.add.int %3152, %3155 : !torch.int, !torch.int -> !torch.int
%int2_1139 = torch.constant.int 2
%3157 = torch.aten.select.int %3144, %int0_1134, %int2_1139 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3158 = torch.aten.item %3157 : !torch.vtensor<[1],si64> -> !torch.int
%3159 = torch.aten.eq.int %3158, %int0_1134 : !torch.int, !torch.int -> !torch.bool
%3160 = torch.aten.Int.bool %3159 : !torch.bool -> !torch.int
%int768_1140 = torch.constant.int 768
%3161 = torch.aten.mul.int %3160, %int768_1140 : !torch.int, !torch.int -> !torch.int
%3162 = torch.aten.add.int %3158, %3161 : !torch.int, !torch.int -> !torch.int
%int3_1141 = torch.constant.int 3
%3163 = torch.aten.select.int %3144, %int0_1134, %int3_1141 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3164 = torch.aten.item %3163 : !torch.vtensor<[1],si64> -> !torch.int
%3165 = torch.aten.eq.int %3164, %int0_1134 : !torch.int, !torch.int -> !torch.bool
%3166 = torch.aten.Int.bool %3165 : !torch.bool -> !torch.int
%3167 = torch.aten.mul.int %3166, %int0_1134 : !torch.int, !torch.int -> !torch.int
%3168 = torch.aten.add.int %3164, %3167 : !torch.int, !torch.int -> !torch.int
%3169 = torch.prim.ListConstruct %3150, %3156, %3162, %3168 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3170 = torch.aten.reshape %3083, %3169 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1142 = torch.constant.int 1
%int2_1143 = torch.constant.int 2
%3171 = torch.aten.transpose.int %3170, %int1_1142, %int2_1143 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3172 = torch.vtensor.literal(dense_resource<__146> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%3173 = torch.vtensor.literal(dense_resource<__147> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%3174 = torch.vtensor.literal(dense_resource<__148> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1144 = torch.constant.int 0
%int0_1145 = torch.constant.int 0
%3175 = torch.aten.select.int %3172, %int0_1144, %int0_1145 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3176 = torch.aten.item %3175 : !torch.vtensor<[1],si64> -> !torch.int
%3177 = torch.aten.eq.int %3176, %int0_1144 : !torch.int, !torch.int -> !torch.bool
%3178 = torch.aten.Int.bool %3177 : !torch.bool -> !torch.int
%int1_1146 = torch.constant.int 1
%3179 = torch.aten.mul.int %3178, %int1_1146 : !torch.int, !torch.int -> !torch.int
%3180 = torch.aten.add.int %3176, %3179 : !torch.int, !torch.int -> !torch.int
%int1_1147 = torch.constant.int 1
%3181 = torch.aten.select.int %3172, %int0_1144, %int1_1147 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3182 = torch.aten.item %3181 : !torch.vtensor<[1],si64> -> !torch.int
%3183 = torch.aten.eq.int %3182, %int0_1144 : !torch.int, !torch.int -> !torch.bool
%3184 = torch.aten.Int.bool %3183 : !torch.bool -> !torch.int
%int12_1148 = torch.constant.int 12
%3185 = torch.aten.mul.int %3184, %int12_1148 : !torch.int, !torch.int -> !torch.int
%3186 = torch.aten.add.int %3182, %3185 : !torch.int, !torch.int -> !torch.int
%int2_1149 = torch.constant.int 2
%3187 = torch.aten.select.int %3172, %int0_1144, %int2_1149 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3188 = torch.aten.item %3187 : !torch.vtensor<[1],si64> -> !torch.int
%3189 = torch.aten.eq.int %3188, %int0_1144 : !torch.int, !torch.int -> !torch.bool
%3190 = torch.aten.Int.bool %3189 : !torch.bool -> !torch.int
%int6_1150 = torch.constant.int 6
%3191 = torch.aten.mul.int %3190, %int6_1150 : !torch.int, !torch.int -> !torch.int
%3192 = torch.aten.add.int %3188, %3191 : !torch.int, !torch.int -> !torch.int
%3193 = torch.prim.ListConstruct %3180, %3186, %3192 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3194 = torch.aten.reshape %3171, %3193 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1151 = torch.constant.int 0
%int0_1152 = torch.constant.int 0
%3195 = torch.aten.select.int %3173, %int0_1151, %int0_1152 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3196 = torch.aten.item %3195 : !torch.vtensor<[1],si64> -> !torch.int
%3197 = torch.aten.eq.int %3196, %int0_1151 : !torch.int, !torch.int -> !torch.bool
%3198 = torch.aten.Int.bool %3197 : !torch.bool -> !torch.int
%int1_1153 = torch.constant.int 1
%3199 = torch.aten.mul.int %3198, %int1_1153 : !torch.int, !torch.int -> !torch.int
%3200 = torch.aten.add.int %3196, %3199 : !torch.int, !torch.int -> !torch.int
%int1_1154 = torch.constant.int 1
%3201 = torch.aten.select.int %3173, %int0_1151, %int1_1154 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3202 = torch.aten.item %3201 : !torch.vtensor<[1],si64> -> !torch.int
%3203 = torch.aten.eq.int %3202, %int0_1151 : !torch.int, !torch.int -> !torch.bool
%3204 = torch.aten.Int.bool %3203 : !torch.bool -> !torch.int
%int12_1155 = torch.constant.int 12
%3205 = torch.aten.mul.int %3204, %int12_1155 : !torch.int, !torch.int -> !torch.int
%3206 = torch.aten.add.int %3202, %3205 : !torch.int, !torch.int -> !torch.int
%int2_1156 = torch.constant.int 2
%3207 = torch.aten.select.int %3173, %int0_1151, %int2_1156 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3208 = torch.aten.item %3207 : !torch.vtensor<[1],si64> -> !torch.int
%3209 = torch.aten.eq.int %3208, %int0_1151 : !torch.int, !torch.int -> !torch.bool
%3210 = torch.aten.Int.bool %3209 : !torch.bool -> !torch.int
%int6_1157 = torch.constant.int 6
%3211 = torch.aten.mul.int %3210, %int6_1157 : !torch.int, !torch.int -> !torch.int
%3212 = torch.aten.add.int %3208, %3211 : !torch.int, !torch.int -> !torch.int
%3213 = torch.prim.ListConstruct %3200, %3206, %3212 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3214 = torch.aten.reshape %3114, %3213 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1158 = torch.constant.int 0
%int0_1159 = torch.constant.int 0
%3215 = torch.aten.select.int %3174, %int0_1158, %int0_1159 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3216 = torch.aten.item %3215 : !torch.vtensor<[1],si64> -> !torch.int
%3217 = torch.aten.eq.int %3216, %int0_1158 : !torch.int, !torch.int -> !torch.bool
%3218 = torch.aten.Int.bool %3217 : !torch.bool -> !torch.int
%int1_1160 = torch.constant.int 1
%3219 = torch.aten.mul.int %3218, %int1_1160 : !torch.int, !torch.int -> !torch.int
%3220 = torch.aten.add.int %3216, %3219 : !torch.int, !torch.int -> !torch.int
%int1_1161 = torch.constant.int 1
%3221 = torch.aten.select.int %3174, %int0_1158, %int1_1161 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3222 = torch.aten.item %3221 : !torch.vtensor<[1],si64> -> !torch.int
%3223 = torch.aten.eq.int %3222, %int0_1158 : !torch.int, !torch.int -> !torch.bool
%3224 = torch.aten.Int.bool %3223 : !torch.bool -> !torch.int
%int12_1162 = torch.constant.int 12
%3225 = torch.aten.mul.int %3224, %int12_1162 : !torch.int, !torch.int -> !torch.int
%3226 = torch.aten.add.int %3222, %3225 : !torch.int, !torch.int -> !torch.int
%int2_1163 = torch.constant.int 2
%3227 = torch.aten.select.int %3174, %int0_1158, %int2_1163 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3228 = torch.aten.item %3227 : !torch.vtensor<[1],si64> -> !torch.int
%3229 = torch.aten.eq.int %3228, %int0_1158 : !torch.int, !torch.int -> !torch.bool
%3230 = torch.aten.Int.bool %3229 : !torch.bool -> !torch.int
%int6_1164 = torch.constant.int 6
%3231 = torch.aten.mul.int %3230, %int6_1164 : !torch.int, !torch.int -> !torch.int
%3232 = torch.aten.add.int %3228, %3231 : !torch.int, !torch.int -> !torch.int
%3233 = torch.prim.ListConstruct %3220, %3226, %3232 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3234 = torch.aten.reshape %3143, %3233 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_1165 = torch.constant.int 1
%int2_1166 = torch.constant.int 2
%3235 = torch.aten.transpose.int %3214, %int1_1165, %int2_1166 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%3236 = torch.aten.matmul %3194, %3235 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%3237 = torch.vtensor.literal(dense_resource<__149> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1167 = torch.constant.int 0
%int0_1168 = torch.constant.int 0
%3238 = torch.aten.select.int %3237, %int0_1167, %int0_1168 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3239 = torch.aten.item %3238 : !torch.vtensor<[1],si64> -> !torch.int
%3240 = torch.aten.eq.int %3239, %int0_1167 : !torch.int, !torch.int -> !torch.bool
%3241 = torch.aten.Int.bool %3240 : !torch.bool -> !torch.int
%int12_1169 = torch.constant.int 12
%3242 = torch.aten.mul.int %3241, %int12_1169 : !torch.int, !torch.int -> !torch.int
%3243 = torch.aten.add.int %3239, %3242 : !torch.int, !torch.int -> !torch.int
%int1_1170 = torch.constant.int 1
%3244 = torch.aten.select.int %3237, %int0_1167, %int1_1170 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3245 = torch.aten.item %3244 : !torch.vtensor<[1],si64> -> !torch.int
%3246 = torch.aten.eq.int %3245, %int0_1167 : !torch.int, !torch.int -> !torch.bool
%3247 = torch.aten.Int.bool %3246 : !torch.bool -> !torch.int
%int6_1171 = torch.constant.int 6
%3248 = torch.aten.mul.int %3247, %int6_1171 : !torch.int, !torch.int -> !torch.int
%3249 = torch.aten.add.int %3245, %3248 : !torch.int, !torch.int -> !torch.int
%int2_1172 = torch.constant.int 2
%3250 = torch.aten.select.int %3237, %int0_1167, %int2_1172 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3251 = torch.aten.item %3250 : !torch.vtensor<[1],si64> -> !torch.int
%3252 = torch.aten.eq.int %3251, %int0_1167 : !torch.int, !torch.int -> !torch.bool
%3253 = torch.aten.Int.bool %3252 : !torch.bool -> !torch.int
%int6_1173 = torch.constant.int 6
%3254 = torch.aten.mul.int %3253, %int6_1173 : !torch.int, !torch.int -> !torch.int
%3255 = torch.aten.add.int %3251, %3254 : !torch.int, !torch.int -> !torch.int
%int3_1174 = torch.constant.int 3
%3256 = torch.aten.select.int %3237, %int0_1167, %int3_1174 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3257 = torch.aten.item %3256 : !torch.vtensor<[1],si64> -> !torch.int
%3258 = torch.aten.eq.int %3257, %int0_1167 : !torch.int, !torch.int -> !torch.bool
%3259 = torch.aten.Int.bool %3258 : !torch.bool -> !torch.int
%3260 = torch.aten.mul.int %3259, %int0_1167 : !torch.int, !torch.int -> !torch.int
%3261 = torch.aten.add.int %3257, %3260 : !torch.int, !torch.int -> !torch.int
%3262 = torch.prim.ListConstruct %3243, %3249, %3255, %3261 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3263 = torch.aten.reshape %3236, %3262 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_1175 = torch.constant.int 1
%3264 = torch.aten.add.Tensor %3263, %277, %int1_1175 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%3265 = torch.vtensor.literal(dense_resource<__150> : tensor<f32>) : !torch.vtensor<[],f32>
%3266 = torch.aten.maximum %3264, %3265 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%3267 = torch.vtensor.literal(dense_resource<__151> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1176 = torch.constant.int 0
%int0_1177 = torch.constant.int 0
%3268 = torch.aten.select.int %3267, %int0_1176, %int0_1177 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3269 = torch.aten.item %3268 : !torch.vtensor<[1],si64> -> !torch.int
%3270 = torch.aten.eq.int %3269, %int0_1176 : !torch.int, !torch.int -> !torch.bool
%3271 = torch.aten.Int.bool %3270 : !torch.bool -> !torch.int
%int-1_1178 = torch.constant.int -1
%3272 = torch.aten.mul.int %3271, %int-1_1178 : !torch.int, !torch.int -> !torch.int
%3273 = torch.aten.add.int %3269, %3272 : !torch.int, !torch.int -> !torch.int
%int1_1179 = torch.constant.int 1
%3274 = torch.aten.select.int %3267, %int0_1176, %int1_1179 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3275 = torch.aten.item %3274 : !torch.vtensor<[1],si64> -> !torch.int
%3276 = torch.aten.eq.int %3275, %int0_1176 : !torch.int, !torch.int -> !torch.bool
%3277 = torch.aten.Int.bool %3276 : !torch.bool -> !torch.int
%int12_1180 = torch.constant.int 12
%3278 = torch.aten.mul.int %3277, %int12_1180 : !torch.int, !torch.int -> !torch.int
%3279 = torch.aten.add.int %3275, %3278 : !torch.int, !torch.int -> !torch.int
%int2_1181 = torch.constant.int 2
%3280 = torch.aten.select.int %3267, %int0_1176, %int2_1181 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3281 = torch.aten.item %3280 : !torch.vtensor<[1],si64> -> !torch.int
%3282 = torch.aten.eq.int %3281, %int0_1176 : !torch.int, !torch.int -> !torch.bool
%3283 = torch.aten.Int.bool %3282 : !torch.bool -> !torch.int
%int6_1182 = torch.constant.int 6
%3284 = torch.aten.mul.int %3283, %int6_1182 : !torch.int, !torch.int -> !torch.int
%3285 = torch.aten.add.int %3281, %3284 : !torch.int, !torch.int -> !torch.int
%3286 = torch.prim.ListConstruct %3273, %3279, %3285 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3287 = torch.aten.reshape %3266, %3286 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_1183 = torch.constant.int 2
%none_1184 = torch.constant.none
%3288 = torch.aten.softmax.int %3287, %int2_1183, %none_1184 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%3289 = torch.aten.matmul %3288, %3234 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%3290 = torch.vtensor.literal(dense_resource<__152> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1185 = torch.constant.int 0
%int0_1186 = torch.constant.int 0
%3291 = torch.aten.select.int %3290, %int0_1185, %int0_1186 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3292 = torch.aten.item %3291 : !torch.vtensor<[1],si64> -> !torch.int
%3293 = torch.aten.eq.int %3292, %int0_1185 : !torch.int, !torch.int -> !torch.bool
%3294 = torch.aten.Int.bool %3293 : !torch.bool -> !torch.int
%int12_1187 = torch.constant.int 12
%3295 = torch.aten.mul.int %3294, %int12_1187 : !torch.int, !torch.int -> !torch.int
%3296 = torch.aten.add.int %3292, %3295 : !torch.int, !torch.int -> !torch.int
%int1_1188 = torch.constant.int 1
%3297 = torch.aten.select.int %3290, %int0_1185, %int1_1188 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3298 = torch.aten.item %3297 : !torch.vtensor<[1],si64> -> !torch.int
%3299 = torch.aten.eq.int %3298, %int0_1185 : !torch.int, !torch.int -> !torch.bool
%3300 = torch.aten.Int.bool %3299 : !torch.bool -> !torch.int
%int6_1189 = torch.constant.int 6
%3301 = torch.aten.mul.int %3300, %int6_1189 : !torch.int, !torch.int -> !torch.int
%3302 = torch.aten.add.int %3298, %3301 : !torch.int, !torch.int -> !torch.int
%int2_1190 = torch.constant.int 2
%3303 = torch.aten.select.int %3290, %int0_1185, %int2_1190 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3304 = torch.aten.item %3303 : !torch.vtensor<[1],si64> -> !torch.int
%3305 = torch.aten.eq.int %3304, %int0_1185 : !torch.int, !torch.int -> !torch.bool
%3306 = torch.aten.Int.bool %3305 : !torch.bool -> !torch.int
%int64_1191 = torch.constant.int 64
%3307 = torch.aten.mul.int %3306, %int64_1191 : !torch.int, !torch.int -> !torch.int
%3308 = torch.aten.add.int %3304, %3307 : !torch.int, !torch.int -> !torch.int
%int3_1192 = torch.constant.int 3
%3309 = torch.aten.select.int %3290, %int0_1185, %int3_1192 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3310 = torch.aten.item %3309 : !torch.vtensor<[1],si64> -> !torch.int
%3311 = torch.aten.eq.int %3310, %int0_1185 : !torch.int, !torch.int -> !torch.bool
%3312 = torch.aten.Int.bool %3311 : !torch.bool -> !torch.int
%3313 = torch.aten.mul.int %3312, %int0_1185 : !torch.int, !torch.int -> !torch.int
%3314 = torch.aten.add.int %3310, %3313 : !torch.int, !torch.int -> !torch.int
%3315 = torch.prim.ListConstruct %3296, %3302, %3308, %3314 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3316 = torch.aten.reshape %3289, %3315 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_1193 = torch.constant.int 1
%int2_1194 = torch.constant.int 2
%3317 = torch.aten.transpose.int %3316, %int1_1193, %int2_1194 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%3318 = torch.vtensor.literal(dense_resource<__153> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1195 = torch.constant.int 0
%int0_1196 = torch.constant.int 0
%3319 = torch.aten.select.int %3318, %int0_1195, %int0_1196 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3320 = torch.aten.item %3319 : !torch.vtensor<[1],si64> -> !torch.int
%3321 = torch.aten.eq.int %3320, %int0_1195 : !torch.int, !torch.int -> !torch.bool
%3322 = torch.aten.Int.bool %3321 : !torch.bool -> !torch.int
%int1_1197 = torch.constant.int 1
%3323 = torch.aten.mul.int %3322, %int1_1197 : !torch.int, !torch.int -> !torch.int
%3324 = torch.aten.add.int %3320, %3323 : !torch.int, !torch.int -> !torch.int
%int1_1198 = torch.constant.int 1
%3325 = torch.aten.select.int %3318, %int0_1195, %int1_1198 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3326 = torch.aten.item %3325 : !torch.vtensor<[1],si64> -> !torch.int
%3327 = torch.aten.eq.int %3326, %int0_1195 : !torch.int, !torch.int -> !torch.bool
%3328 = torch.aten.Int.bool %3327 : !torch.bool -> !torch.int
%int6_1199 = torch.constant.int 6
%3329 = torch.aten.mul.int %3328, %int6_1199 : !torch.int, !torch.int -> !torch.int
%3330 = torch.aten.add.int %3326, %3329 : !torch.int, !torch.int -> !torch.int
%int2_1200 = torch.constant.int 2
%3331 = torch.aten.select.int %3318, %int0_1195, %int2_1200 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3332 = torch.aten.item %3331 : !torch.vtensor<[1],si64> -> !torch.int
%3333 = torch.aten.eq.int %3332, %int0_1195 : !torch.int, !torch.int -> !torch.bool
%3334 = torch.aten.Int.bool %3333 : !torch.bool -> !torch.int
%int12_1201 = torch.constant.int 12
%3335 = torch.aten.mul.int %3334, %int12_1201 : !torch.int, !torch.int -> !torch.int
%3336 = torch.aten.add.int %3332, %3335 : !torch.int, !torch.int -> !torch.int
%3337 = torch.prim.ListConstruct %3324, %3330, %3336 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3338 = torch.aten.reshape %3317, %3337 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%3339 = torch.aten.matmul %3338, %187 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1202 = torch.constant.int 1
%3340 = torch.aten.add.Tensor %115, %3339, %int1_1202 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_1203 = torch.constant.int 1
%3341 = torch.aten.add.Tensor %3078, %3340, %int1_1203 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3342 = torch.vtensor.literal(dense_resource<__154> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_1204 = torch.constant.int 0
%int0_1205 = torch.constant.int 0
%3343 = torch.aten.select.int %3342, %int0_1204, %int0_1205 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3344 = torch.aten.item %3343 : !torch.vtensor<[1],si64> -> !torch.int
%3345 = torch.aten.eq.int %3344, %int0_1204 : !torch.int, !torch.int -> !torch.bool
%3346 = torch.aten.Int.bool %3345 : !torch.bool -> !torch.int
%int1_1206 = torch.constant.int 1
%3347 = torch.aten.mul.int %3346, %int1_1206 : !torch.int, !torch.int -> !torch.int
%3348 = torch.aten.add.int %3344, %3347 : !torch.int, !torch.int -> !torch.int
%int1_1207 = torch.constant.int 1
%3349 = torch.aten.select.int %3342, %int0_1204, %int1_1207 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3350 = torch.aten.item %3349 : !torch.vtensor<[1],si64> -> !torch.int
%3351 = torch.aten.eq.int %3350, %int0_1204 : !torch.int, !torch.int -> !torch.bool
%3352 = torch.aten.Int.bool %3351 : !torch.bool -> !torch.int
%int6_1208 = torch.constant.int 6
%3353 = torch.aten.mul.int %3352, %int6_1208 : !torch.int, !torch.int -> !torch.int
%3354 = torch.aten.add.int %3350, %3353 : !torch.int, !torch.int -> !torch.int
%3355 = torch.prim.ListConstruct %3348, %3354 : (!torch.int, !torch.int) -> !torch.list<int>
%3356 = torch.aten.reshape %3341, %3355 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_1209 = torch.constant.float 9.9999997473787516E-6
%int768_1210 = torch.constant.int 768
%3357 = torch.prim.ListConstruct %int768_1210 : (!torch.int) -> !torch.list<int>
%result0_1211, %result1_1212, %result2_1213 = torch.aten.native_layer_norm %3356, %3357, %122, %123, %float9.999990e-06_1209 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_1214 = torch.constant.int 0
%int1_1215 = torch.constant.int 1
%3358 = torch.aten.transpose.int %118, %int0_1214, %int1_1215 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%3359 = torch.aten.mm %result0_1211, %3358 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%3360 = torch.aten.add.Tensor %3359, %119, %int1_1215 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%3361 = torch.aten.relu %3360 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_1216 = torch.constant.int 0
%int1_1217 = torch.constant.int 1
%3362 = torch.aten.transpose.int %120, %int0_1216, %int1_1217 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%3363 = torch.aten.mm %3361, %3362 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%3364 = torch.aten.add.Tensor %3363, %121, %int1_1217 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_1218 = torch.constant.int 1
%3365 = torch.aten.add.Tensor %3356, %3364, %int1_1218 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%3366 = torch.vtensor.literal(dense_resource<__155> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1219 = torch.constant.int 0
%int0_1220 = torch.constant.int 0
%3367 = torch.aten.select.int %3366, %int0_1219, %int0_1220 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3368 = torch.aten.item %3367 : !torch.vtensor<[1],si64> -> !torch.int
%3369 = torch.aten.eq.int %3368, %int0_1219 : !torch.int, !torch.int -> !torch.bool
%3370 = torch.aten.Int.bool %3369 : !torch.bool -> !torch.int
%int6_1221 = torch.constant.int 6
%3371 = torch.aten.mul.int %3370, %int6_1221 : !torch.int, !torch.int -> !torch.int
%3372 = torch.aten.add.int %3368, %3371 : !torch.int, !torch.int -> !torch.int
%int1_1222 = torch.constant.int 1
%3373 = torch.aten.select.int %3366, %int0_1219, %int1_1222 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3374 = torch.aten.item %3373 : !torch.vtensor<[1],si64> -> !torch.int
%3375 = torch.aten.eq.int %3374, %int0_1219 : !torch.int, !torch.int -> !torch.bool
%3376 = torch.aten.Int.bool %3375 : !torch.bool -> !torch.int
%int768_1223 = torch.constant.int 768
%3377 = torch.aten.mul.int %3376, %int768_1223 : !torch.int, !torch.int -> !torch.int
%3378 = torch.aten.add.int %3374, %3377 : !torch.int, !torch.int -> !torch.int
%int2_1224 = torch.constant.int 2
%3379 = torch.aten.select.int %3366, %int0_1219, %int2_1224 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3380 = torch.aten.item %3379 : !torch.vtensor<[1],si64> -> !torch.int
%3381 = torch.aten.eq.int %3380, %int0_1219 : !torch.int, !torch.int -> !torch.bool
%3382 = torch.aten.Int.bool %3381 : !torch.bool -> !torch.int
%3383 = torch.aten.mul.int %3382, %int0_1219 : !torch.int, !torch.int -> !torch.int
%3384 = torch.aten.add.int %3380, %3383 : !torch.int, !torch.int -> !torch.int
%3385 = torch.prim.ListConstruct %3372, %3378, %3384 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3386 = torch.aten.reshape %3365, %3385 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_1225 = torch.constant.float 9.9999997473787516E-6
%int768_1226 = torch.constant.int 768
%3387 = torch.prim.ListConstruct %int768_1226 : (!torch.int) -> !torch.list<int>
%result0_1227, %result1_1228, %result2_1229 = torch.aten.native_layer_norm %3386, %3387, %128, %129, %float9.999990e-06_1225 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%3388 = torch.aten.matmul %result0_1227, %188 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1230 = torch.constant.int 1
%3389 = torch.aten.add.Tensor %126, %3388, %int1_1230 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3390 = torch.vtensor.literal(dense_resource<__156> : tensor<f32>) : !torch.vtensor<[],f32>
%3391 = torch.aten.mul.Tensor %3389, %3390 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%3392 = torch.aten.matmul %result0_1227, %189 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1231 = torch.constant.int 1
%3393 = torch.aten.add.Tensor %124, %3392, %int1_1231 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3394 = torch.vtensor.literal(dense_resource<__157> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%3395 = torch.vtensor.literal(dense_resource<__158> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1232 = torch.constant.int 0
%int0_1233 = torch.constant.int 0
%3396 = torch.aten.select.int %3394, %int0_1232, %int0_1233 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3397 = torch.aten.item %3396 : !torch.vtensor<[1],si64> -> !torch.int
%3398 = torch.aten.eq.int %3397, %int0_1232 : !torch.int, !torch.int -> !torch.bool
%3399 = torch.aten.Int.bool %3398 : !torch.bool -> !torch.int
%int1_1234 = torch.constant.int 1
%3400 = torch.aten.mul.int %3399, %int1_1234 : !torch.int, !torch.int -> !torch.int
%3401 = torch.aten.add.int %3397, %3400 : !torch.int, !torch.int -> !torch.int
%int1_1235 = torch.constant.int 1
%3402 = torch.aten.select.int %3394, %int0_1232, %int1_1235 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3403 = torch.aten.item %3402 : !torch.vtensor<[1],si64> -> !torch.int
%3404 = torch.aten.eq.int %3403, %int0_1232 : !torch.int, !torch.int -> !torch.bool
%3405 = torch.aten.Int.bool %3404 : !torch.bool -> !torch.int
%int6_1236 = torch.constant.int 6
%3406 = torch.aten.mul.int %3405, %int6_1236 : !torch.int, !torch.int -> !torch.int
%3407 = torch.aten.add.int %3403, %3406 : !torch.int, !torch.int -> !torch.int
%int2_1237 = torch.constant.int 2
%3408 = torch.aten.select.int %3394, %int0_1232, %int2_1237 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3409 = torch.aten.item %3408 : !torch.vtensor<[1],si64> -> !torch.int
%3410 = torch.aten.eq.int %3409, %int0_1232 : !torch.int, !torch.int -> !torch.bool
%3411 = torch.aten.Int.bool %3410 : !torch.bool -> !torch.int
%int768_1238 = torch.constant.int 768
%3412 = torch.aten.mul.int %3411, %int768_1238 : !torch.int, !torch.int -> !torch.int
%3413 = torch.aten.add.int %3409, %3412 : !torch.int, !torch.int -> !torch.int
%int3_1239 = torch.constant.int 3
%3414 = torch.aten.select.int %3394, %int0_1232, %int3_1239 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3415 = torch.aten.item %3414 : !torch.vtensor<[1],si64> -> !torch.int
%3416 = torch.aten.eq.int %3415, %int0_1232 : !torch.int, !torch.int -> !torch.bool
%3417 = torch.aten.Int.bool %3416 : !torch.bool -> !torch.int
%3418 = torch.aten.mul.int %3417, %int0_1232 : !torch.int, !torch.int -> !torch.int
%3419 = torch.aten.add.int %3415, %3418 : !torch.int, !torch.int -> !torch.int
%3420 = torch.prim.ListConstruct %3401, %3407, %3413, %3419 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3421 = torch.aten.reshape %3393, %3420 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1240 = torch.constant.int 1
%int2_1241 = torch.constant.int 2
%3422 = torch.aten.transpose.int %3421, %int1_1240, %int2_1241 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3423 = torch.aten.matmul %result0_1227, %190 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1242 = torch.constant.int 1
%3424 = torch.aten.add.Tensor %125, %3423, %int1_1242 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_1243 = torch.constant.int 0
%int0_1244 = torch.constant.int 0
%3425 = torch.aten.select.int %3395, %int0_1243, %int0_1244 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3426 = torch.aten.item %3425 : !torch.vtensor<[1],si64> -> !torch.int
%3427 = torch.aten.eq.int %3426, %int0_1243 : !torch.int, !torch.int -> !torch.bool
%3428 = torch.aten.Int.bool %3427 : !torch.bool -> !torch.int
%int1_1245 = torch.constant.int 1
%3429 = torch.aten.mul.int %3428, %int1_1245 : !torch.int, !torch.int -> !torch.int
%3430 = torch.aten.add.int %3426, %3429 : !torch.int, !torch.int -> !torch.int
%int1_1246 = torch.constant.int 1
%3431 = torch.aten.select.int %3395, %int0_1243, %int1_1246 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3432 = torch.aten.item %3431 : !torch.vtensor<[1],si64> -> !torch.int
%3433 = torch.aten.eq.int %3432, %int0_1243 : !torch.int, !torch.int -> !torch.bool
%3434 = torch.aten.Int.bool %3433 : !torch.bool -> !torch.int
%int6_1247 = torch.constant.int 6
%3435 = torch.aten.mul.int %3434, %int6_1247 : !torch.int, !torch.int -> !torch.int
%3436 = torch.aten.add.int %3432, %3435 : !torch.int, !torch.int -> !torch.int
%int2_1248 = torch.constant.int 2
%3437 = torch.aten.select.int %3395, %int0_1243, %int2_1248 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3438 = torch.aten.item %3437 : !torch.vtensor<[1],si64> -> !torch.int
%3439 = torch.aten.eq.int %3438, %int0_1243 : !torch.int, !torch.int -> !torch.bool
%3440 = torch.aten.Int.bool %3439 : !torch.bool -> !torch.int
%int768_1249 = torch.constant.int 768
%3441 = torch.aten.mul.int %3440, %int768_1249 : !torch.int, !torch.int -> !torch.int
%3442 = torch.aten.add.int %3438, %3441 : !torch.int, !torch.int -> !torch.int
%int3_1250 = torch.constant.int 3
%3443 = torch.aten.select.int %3395, %int0_1243, %int3_1250 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3444 = torch.aten.item %3443 : !torch.vtensor<[1],si64> -> !torch.int
%3445 = torch.aten.eq.int %3444, %int0_1243 : !torch.int, !torch.int -> !torch.bool
%3446 = torch.aten.Int.bool %3445 : !torch.bool -> !torch.int
%3447 = torch.aten.mul.int %3446, %int0_1243 : !torch.int, !torch.int -> !torch.int
%3448 = torch.aten.add.int %3444, %3447 : !torch.int, !torch.int -> !torch.int
%3449 = torch.prim.ListConstruct %3430, %3436, %3442, %3448 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3450 = torch.aten.reshape %3424, %3449 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1251 = torch.constant.int 1
%int2_1252 = torch.constant.int 2
%3451 = torch.aten.transpose.int %3450, %int1_1251, %int2_1252 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3452 = torch.vtensor.literal(dense_resource<__159> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1253 = torch.constant.int 0
%int0_1254 = torch.constant.int 0
%3453 = torch.aten.select.int %3452, %int0_1253, %int0_1254 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3454 = torch.aten.item %3453 : !torch.vtensor<[1],si64> -> !torch.int
%3455 = torch.aten.eq.int %3454, %int0_1253 : !torch.int, !torch.int -> !torch.bool
%3456 = torch.aten.Int.bool %3455 : !torch.bool -> !torch.int
%int1_1255 = torch.constant.int 1
%3457 = torch.aten.mul.int %3456, %int1_1255 : !torch.int, !torch.int -> !torch.int
%3458 = torch.aten.add.int %3454, %3457 : !torch.int, !torch.int -> !torch.int
%int1_1256 = torch.constant.int 1
%3459 = torch.aten.select.int %3452, %int0_1253, %int1_1256 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3460 = torch.aten.item %3459 : !torch.vtensor<[1],si64> -> !torch.int
%3461 = torch.aten.eq.int %3460, %int0_1253 : !torch.int, !torch.int -> !torch.bool
%3462 = torch.aten.Int.bool %3461 : !torch.bool -> !torch.int
%int6_1257 = torch.constant.int 6
%3463 = torch.aten.mul.int %3462, %int6_1257 : !torch.int, !torch.int -> !torch.int
%3464 = torch.aten.add.int %3460, %3463 : !torch.int, !torch.int -> !torch.int
%int2_1258 = torch.constant.int 2
%3465 = torch.aten.select.int %3452, %int0_1253, %int2_1258 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3466 = torch.aten.item %3465 : !torch.vtensor<[1],si64> -> !torch.int
%3467 = torch.aten.eq.int %3466, %int0_1253 : !torch.int, !torch.int -> !torch.bool
%3468 = torch.aten.Int.bool %3467 : !torch.bool -> !torch.int
%int768_1259 = torch.constant.int 768
%3469 = torch.aten.mul.int %3468, %int768_1259 : !torch.int, !torch.int -> !torch.int
%3470 = torch.aten.add.int %3466, %3469 : !torch.int, !torch.int -> !torch.int
%int3_1260 = torch.constant.int 3
%3471 = torch.aten.select.int %3452, %int0_1253, %int3_1260 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3472 = torch.aten.item %3471 : !torch.vtensor<[1],si64> -> !torch.int
%3473 = torch.aten.eq.int %3472, %int0_1253 : !torch.int, !torch.int -> !torch.bool
%3474 = torch.aten.Int.bool %3473 : !torch.bool -> !torch.int
%3475 = torch.aten.mul.int %3474, %int0_1253 : !torch.int, !torch.int -> !torch.int
%3476 = torch.aten.add.int %3472, %3475 : !torch.int, !torch.int -> !torch.int
%3477 = torch.prim.ListConstruct %3458, %3464, %3470, %3476 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3478 = torch.aten.reshape %3391, %3477 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1261 = torch.constant.int 1
%int2_1262 = torch.constant.int 2
%3479 = torch.aten.transpose.int %3478, %int1_1261, %int2_1262 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3480 = torch.vtensor.literal(dense_resource<__160> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%3481 = torch.vtensor.literal(dense_resource<__161> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%3482 = torch.vtensor.literal(dense_resource<__162> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1263 = torch.constant.int 0
%int0_1264 = torch.constant.int 0
%3483 = torch.aten.select.int %3480, %int0_1263, %int0_1264 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3484 = torch.aten.item %3483 : !torch.vtensor<[1],si64> -> !torch.int
%3485 = torch.aten.eq.int %3484, %int0_1263 : !torch.int, !torch.int -> !torch.bool
%3486 = torch.aten.Int.bool %3485 : !torch.bool -> !torch.int
%int1_1265 = torch.constant.int 1
%3487 = torch.aten.mul.int %3486, %int1_1265 : !torch.int, !torch.int -> !torch.int
%3488 = torch.aten.add.int %3484, %3487 : !torch.int, !torch.int -> !torch.int
%int1_1266 = torch.constant.int 1
%3489 = torch.aten.select.int %3480, %int0_1263, %int1_1266 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3490 = torch.aten.item %3489 : !torch.vtensor<[1],si64> -> !torch.int
%3491 = torch.aten.eq.int %3490, %int0_1263 : !torch.int, !torch.int -> !torch.bool
%3492 = torch.aten.Int.bool %3491 : !torch.bool -> !torch.int
%int12_1267 = torch.constant.int 12
%3493 = torch.aten.mul.int %3492, %int12_1267 : !torch.int, !torch.int -> !torch.int
%3494 = torch.aten.add.int %3490, %3493 : !torch.int, !torch.int -> !torch.int
%int2_1268 = torch.constant.int 2
%3495 = torch.aten.select.int %3480, %int0_1263, %int2_1268 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3496 = torch.aten.item %3495 : !torch.vtensor<[1],si64> -> !torch.int
%3497 = torch.aten.eq.int %3496, %int0_1263 : !torch.int, !torch.int -> !torch.bool
%3498 = torch.aten.Int.bool %3497 : !torch.bool -> !torch.int
%int6_1269 = torch.constant.int 6
%3499 = torch.aten.mul.int %3498, %int6_1269 : !torch.int, !torch.int -> !torch.int
%3500 = torch.aten.add.int %3496, %3499 : !torch.int, !torch.int -> !torch.int
%3501 = torch.prim.ListConstruct %3488, %3494, %3500 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3502 = torch.aten.reshape %3479, %3501 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1270 = torch.constant.int 0
%int0_1271 = torch.constant.int 0
%3503 = torch.aten.select.int %3481, %int0_1270, %int0_1271 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3504 = torch.aten.item %3503 : !torch.vtensor<[1],si64> -> !torch.int
%3505 = torch.aten.eq.int %3504, %int0_1270 : !torch.int, !torch.int -> !torch.bool
%3506 = torch.aten.Int.bool %3505 : !torch.bool -> !torch.int
%int1_1272 = torch.constant.int 1
%3507 = torch.aten.mul.int %3506, %int1_1272 : !torch.int, !torch.int -> !torch.int
%3508 = torch.aten.add.int %3504, %3507 : !torch.int, !torch.int -> !torch.int
%int1_1273 = torch.constant.int 1
%3509 = torch.aten.select.int %3481, %int0_1270, %int1_1273 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3510 = torch.aten.item %3509 : !torch.vtensor<[1],si64> -> !torch.int
%3511 = torch.aten.eq.int %3510, %int0_1270 : !torch.int, !torch.int -> !torch.bool
%3512 = torch.aten.Int.bool %3511 : !torch.bool -> !torch.int
%int12_1274 = torch.constant.int 12
%3513 = torch.aten.mul.int %3512, %int12_1274 : !torch.int, !torch.int -> !torch.int
%3514 = torch.aten.add.int %3510, %3513 : !torch.int, !torch.int -> !torch.int
%int2_1275 = torch.constant.int 2
%3515 = torch.aten.select.int %3481, %int0_1270, %int2_1275 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3516 = torch.aten.item %3515 : !torch.vtensor<[1],si64> -> !torch.int
%3517 = torch.aten.eq.int %3516, %int0_1270 : !torch.int, !torch.int -> !torch.bool
%3518 = torch.aten.Int.bool %3517 : !torch.bool -> !torch.int
%int6_1276 = torch.constant.int 6
%3519 = torch.aten.mul.int %3518, %int6_1276 : !torch.int, !torch.int -> !torch.int
%3520 = torch.aten.add.int %3516, %3519 : !torch.int, !torch.int -> !torch.int
%3521 = torch.prim.ListConstruct %3508, %3514, %3520 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3522 = torch.aten.reshape %3422, %3521 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1277 = torch.constant.int 0
%int0_1278 = torch.constant.int 0
%3523 = torch.aten.select.int %3482, %int0_1277, %int0_1278 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3524 = torch.aten.item %3523 : !torch.vtensor<[1],si64> -> !torch.int
%3525 = torch.aten.eq.int %3524, %int0_1277 : !torch.int, !torch.int -> !torch.bool
%3526 = torch.aten.Int.bool %3525 : !torch.bool -> !torch.int
%int1_1279 = torch.constant.int 1
%3527 = torch.aten.mul.int %3526, %int1_1279 : !torch.int, !torch.int -> !torch.int
%3528 = torch.aten.add.int %3524, %3527 : !torch.int, !torch.int -> !torch.int
%int1_1280 = torch.constant.int 1
%3529 = torch.aten.select.int %3482, %int0_1277, %int1_1280 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3530 = torch.aten.item %3529 : !torch.vtensor<[1],si64> -> !torch.int
%3531 = torch.aten.eq.int %3530, %int0_1277 : !torch.int, !torch.int -> !torch.bool
%3532 = torch.aten.Int.bool %3531 : !torch.bool -> !torch.int
%int12_1281 = torch.constant.int 12
%3533 = torch.aten.mul.int %3532, %int12_1281 : !torch.int, !torch.int -> !torch.int
%3534 = torch.aten.add.int %3530, %3533 : !torch.int, !torch.int -> !torch.int
%int2_1282 = torch.constant.int 2
%3535 = torch.aten.select.int %3482, %int0_1277, %int2_1282 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3536 = torch.aten.item %3535 : !torch.vtensor<[1],si64> -> !torch.int
%3537 = torch.aten.eq.int %3536, %int0_1277 : !torch.int, !torch.int -> !torch.bool
%3538 = torch.aten.Int.bool %3537 : !torch.bool -> !torch.int
%int6_1283 = torch.constant.int 6
%3539 = torch.aten.mul.int %3538, %int6_1283 : !torch.int, !torch.int -> !torch.int
%3540 = torch.aten.add.int %3536, %3539 : !torch.int, !torch.int -> !torch.int
%3541 = torch.prim.ListConstruct %3528, %3534, %3540 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3542 = torch.aten.reshape %3451, %3541 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_1284 = torch.constant.int 1
%int2_1285 = torch.constant.int 2
%3543 = torch.aten.transpose.int %3522, %int1_1284, %int2_1285 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%3544 = torch.aten.matmul %3502, %3543 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%3545 = torch.vtensor.literal(dense_resource<__163> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1286 = torch.constant.int 0
%int0_1287 = torch.constant.int 0
%3546 = torch.aten.select.int %3545, %int0_1286, %int0_1287 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3547 = torch.aten.item %3546 : !torch.vtensor<[1],si64> -> !torch.int
%3548 = torch.aten.eq.int %3547, %int0_1286 : !torch.int, !torch.int -> !torch.bool
%3549 = torch.aten.Int.bool %3548 : !torch.bool -> !torch.int
%int12_1288 = torch.constant.int 12
%3550 = torch.aten.mul.int %3549, %int12_1288 : !torch.int, !torch.int -> !torch.int
%3551 = torch.aten.add.int %3547, %3550 : !torch.int, !torch.int -> !torch.int
%int1_1289 = torch.constant.int 1
%3552 = torch.aten.select.int %3545, %int0_1286, %int1_1289 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3553 = torch.aten.item %3552 : !torch.vtensor<[1],si64> -> !torch.int
%3554 = torch.aten.eq.int %3553, %int0_1286 : !torch.int, !torch.int -> !torch.bool
%3555 = torch.aten.Int.bool %3554 : !torch.bool -> !torch.int
%int6_1290 = torch.constant.int 6
%3556 = torch.aten.mul.int %3555, %int6_1290 : !torch.int, !torch.int -> !torch.int
%3557 = torch.aten.add.int %3553, %3556 : !torch.int, !torch.int -> !torch.int
%int2_1291 = torch.constant.int 2
%3558 = torch.aten.select.int %3545, %int0_1286, %int2_1291 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3559 = torch.aten.item %3558 : !torch.vtensor<[1],si64> -> !torch.int
%3560 = torch.aten.eq.int %3559, %int0_1286 : !torch.int, !torch.int -> !torch.bool
%3561 = torch.aten.Int.bool %3560 : !torch.bool -> !torch.int
%int6_1292 = torch.constant.int 6
%3562 = torch.aten.mul.int %3561, %int6_1292 : !torch.int, !torch.int -> !torch.int
%3563 = torch.aten.add.int %3559, %3562 : !torch.int, !torch.int -> !torch.int
%int3_1293 = torch.constant.int 3
%3564 = torch.aten.select.int %3545, %int0_1286, %int3_1293 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3565 = torch.aten.item %3564 : !torch.vtensor<[1],si64> -> !torch.int
%3566 = torch.aten.eq.int %3565, %int0_1286 : !torch.int, !torch.int -> !torch.bool
%3567 = torch.aten.Int.bool %3566 : !torch.bool -> !torch.int
%3568 = torch.aten.mul.int %3567, %int0_1286 : !torch.int, !torch.int -> !torch.int
%3569 = torch.aten.add.int %3565, %3568 : !torch.int, !torch.int -> !torch.int
%3570 = torch.prim.ListConstruct %3551, %3557, %3563, %3569 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3571 = torch.aten.reshape %3544, %3570 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_1294 = torch.constant.int 1
%3572 = torch.aten.add.Tensor %3571, %277, %int1_1294 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%3573 = torch.vtensor.literal(dense_resource<__164> : tensor<f32>) : !torch.vtensor<[],f32>
%3574 = torch.aten.maximum %3572, %3573 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%3575 = torch.vtensor.literal(dense_resource<__165> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1295 = torch.constant.int 0
%int0_1296 = torch.constant.int 0
%3576 = torch.aten.select.int %3575, %int0_1295, %int0_1296 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3577 = torch.aten.item %3576 : !torch.vtensor<[1],si64> -> !torch.int
%3578 = torch.aten.eq.int %3577, %int0_1295 : !torch.int, !torch.int -> !torch.bool
%3579 = torch.aten.Int.bool %3578 : !torch.bool -> !torch.int
%int-1_1297 = torch.constant.int -1
%3580 = torch.aten.mul.int %3579, %int-1_1297 : !torch.int, !torch.int -> !torch.int
%3581 = torch.aten.add.int %3577, %3580 : !torch.int, !torch.int -> !torch.int
%int1_1298 = torch.constant.int 1
%3582 = torch.aten.select.int %3575, %int0_1295, %int1_1298 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3583 = torch.aten.item %3582 : !torch.vtensor<[1],si64> -> !torch.int
%3584 = torch.aten.eq.int %3583, %int0_1295 : !torch.int, !torch.int -> !torch.bool
%3585 = torch.aten.Int.bool %3584 : !torch.bool -> !torch.int
%int12_1299 = torch.constant.int 12
%3586 = torch.aten.mul.int %3585, %int12_1299 : !torch.int, !torch.int -> !torch.int
%3587 = torch.aten.add.int %3583, %3586 : !torch.int, !torch.int -> !torch.int
%int2_1300 = torch.constant.int 2
%3588 = torch.aten.select.int %3575, %int0_1295, %int2_1300 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3589 = torch.aten.item %3588 : !torch.vtensor<[1],si64> -> !torch.int
%3590 = torch.aten.eq.int %3589, %int0_1295 : !torch.int, !torch.int -> !torch.bool
%3591 = torch.aten.Int.bool %3590 : !torch.bool -> !torch.int
%int6_1301 = torch.constant.int 6
%3592 = torch.aten.mul.int %3591, %int6_1301 : !torch.int, !torch.int -> !torch.int
%3593 = torch.aten.add.int %3589, %3592 : !torch.int, !torch.int -> !torch.int
%3594 = torch.prim.ListConstruct %3581, %3587, %3593 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3595 = torch.aten.reshape %3574, %3594 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_1302 = torch.constant.int 2
%none_1303 = torch.constant.none
%3596 = torch.aten.softmax.int %3595, %int2_1302, %none_1303 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%3597 = torch.aten.matmul %3596, %3542 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%3598 = torch.vtensor.literal(dense_resource<__166> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1304 = torch.constant.int 0
%int0_1305 = torch.constant.int 0
%3599 = torch.aten.select.int %3598, %int0_1304, %int0_1305 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3600 = torch.aten.item %3599 : !torch.vtensor<[1],si64> -> !torch.int
%3601 = torch.aten.eq.int %3600, %int0_1304 : !torch.int, !torch.int -> !torch.bool
%3602 = torch.aten.Int.bool %3601 : !torch.bool -> !torch.int
%int12_1306 = torch.constant.int 12
%3603 = torch.aten.mul.int %3602, %int12_1306 : !torch.int, !torch.int -> !torch.int
%3604 = torch.aten.add.int %3600, %3603 : !torch.int, !torch.int -> !torch.int
%int1_1307 = torch.constant.int 1
%3605 = torch.aten.select.int %3598, %int0_1304, %int1_1307 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3606 = torch.aten.item %3605 : !torch.vtensor<[1],si64> -> !torch.int
%3607 = torch.aten.eq.int %3606, %int0_1304 : !torch.int, !torch.int -> !torch.bool
%3608 = torch.aten.Int.bool %3607 : !torch.bool -> !torch.int
%int6_1308 = torch.constant.int 6
%3609 = torch.aten.mul.int %3608, %int6_1308 : !torch.int, !torch.int -> !torch.int
%3610 = torch.aten.add.int %3606, %3609 : !torch.int, !torch.int -> !torch.int
%int2_1309 = torch.constant.int 2
%3611 = torch.aten.select.int %3598, %int0_1304, %int2_1309 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3612 = torch.aten.item %3611 : !torch.vtensor<[1],si64> -> !torch.int
%3613 = torch.aten.eq.int %3612, %int0_1304 : !torch.int, !torch.int -> !torch.bool
%3614 = torch.aten.Int.bool %3613 : !torch.bool -> !torch.int
%int64_1310 = torch.constant.int 64
%3615 = torch.aten.mul.int %3614, %int64_1310 : !torch.int, !torch.int -> !torch.int
%3616 = torch.aten.add.int %3612, %3615 : !torch.int, !torch.int -> !torch.int
%int3_1311 = torch.constant.int 3
%3617 = torch.aten.select.int %3598, %int0_1304, %int3_1311 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3618 = torch.aten.item %3617 : !torch.vtensor<[1],si64> -> !torch.int
%3619 = torch.aten.eq.int %3618, %int0_1304 : !torch.int, !torch.int -> !torch.bool
%3620 = torch.aten.Int.bool %3619 : !torch.bool -> !torch.int
%3621 = torch.aten.mul.int %3620, %int0_1304 : !torch.int, !torch.int -> !torch.int
%3622 = torch.aten.add.int %3618, %3621 : !torch.int, !torch.int -> !torch.int
%3623 = torch.prim.ListConstruct %3604, %3610, %3616, %3622 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3624 = torch.aten.reshape %3597, %3623 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_1312 = torch.constant.int 1
%int2_1313 = torch.constant.int 2
%3625 = torch.aten.transpose.int %3624, %int1_1312, %int2_1313 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%3626 = torch.vtensor.literal(dense_resource<__167> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1314 = torch.constant.int 0
%int0_1315 = torch.constant.int 0
%3627 = torch.aten.select.int %3626, %int0_1314, %int0_1315 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3628 = torch.aten.item %3627 : !torch.vtensor<[1],si64> -> !torch.int
%3629 = torch.aten.eq.int %3628, %int0_1314 : !torch.int, !torch.int -> !torch.bool
%3630 = torch.aten.Int.bool %3629 : !torch.bool -> !torch.int
%int1_1316 = torch.constant.int 1
%3631 = torch.aten.mul.int %3630, %int1_1316 : !torch.int, !torch.int -> !torch.int
%3632 = torch.aten.add.int %3628, %3631 : !torch.int, !torch.int -> !torch.int
%int1_1317 = torch.constant.int 1
%3633 = torch.aten.select.int %3626, %int0_1314, %int1_1317 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3634 = torch.aten.item %3633 : !torch.vtensor<[1],si64> -> !torch.int
%3635 = torch.aten.eq.int %3634, %int0_1314 : !torch.int, !torch.int -> !torch.bool
%3636 = torch.aten.Int.bool %3635 : !torch.bool -> !torch.int
%int6_1318 = torch.constant.int 6
%3637 = torch.aten.mul.int %3636, %int6_1318 : !torch.int, !torch.int -> !torch.int
%3638 = torch.aten.add.int %3634, %3637 : !torch.int, !torch.int -> !torch.int
%int2_1319 = torch.constant.int 2
%3639 = torch.aten.select.int %3626, %int0_1314, %int2_1319 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3640 = torch.aten.item %3639 : !torch.vtensor<[1],si64> -> !torch.int
%3641 = torch.aten.eq.int %3640, %int0_1314 : !torch.int, !torch.int -> !torch.bool
%3642 = torch.aten.Int.bool %3641 : !torch.bool -> !torch.int
%int12_1320 = torch.constant.int 12
%3643 = torch.aten.mul.int %3642, %int12_1320 : !torch.int, !torch.int -> !torch.int
%3644 = torch.aten.add.int %3640, %3643 : !torch.int, !torch.int -> !torch.int
%3645 = torch.prim.ListConstruct %3632, %3638, %3644 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3646 = torch.aten.reshape %3625, %3645 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%3647 = torch.aten.matmul %3646, %191 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1321 = torch.constant.int 1
%3648 = torch.aten.add.Tensor %127, %3647, %int1_1321 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_1322 = torch.constant.int 1
%3649 = torch.aten.add.Tensor %3386, %3648, %int1_1322 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3650 = torch.vtensor.literal(dense_resource<__168> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_1323 = torch.constant.int 0
%int0_1324 = torch.constant.int 0
%3651 = torch.aten.select.int %3650, %int0_1323, %int0_1324 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3652 = torch.aten.item %3651 : !torch.vtensor<[1],si64> -> !torch.int
%3653 = torch.aten.eq.int %3652, %int0_1323 : !torch.int, !torch.int -> !torch.bool
%3654 = torch.aten.Int.bool %3653 : !torch.bool -> !torch.int
%int1_1325 = torch.constant.int 1
%3655 = torch.aten.mul.int %3654, %int1_1325 : !torch.int, !torch.int -> !torch.int
%3656 = torch.aten.add.int %3652, %3655 : !torch.int, !torch.int -> !torch.int
%int1_1326 = torch.constant.int 1
%3657 = torch.aten.select.int %3650, %int0_1323, %int1_1326 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3658 = torch.aten.item %3657 : !torch.vtensor<[1],si64> -> !torch.int
%3659 = torch.aten.eq.int %3658, %int0_1323 : !torch.int, !torch.int -> !torch.bool
%3660 = torch.aten.Int.bool %3659 : !torch.bool -> !torch.int
%int6_1327 = torch.constant.int 6
%3661 = torch.aten.mul.int %3660, %int6_1327 : !torch.int, !torch.int -> !torch.int
%3662 = torch.aten.add.int %3658, %3661 : !torch.int, !torch.int -> !torch.int
%3663 = torch.prim.ListConstruct %3656, %3662 : (!torch.int, !torch.int) -> !torch.list<int>
%3664 = torch.aten.reshape %3649, %3663 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_1328 = torch.constant.float 9.9999997473787516E-6
%int768_1329 = torch.constant.int 768
%3665 = torch.prim.ListConstruct %int768_1329 : (!torch.int) -> !torch.list<int>
%result0_1330, %result1_1331, %result2_1332 = torch.aten.native_layer_norm %3664, %3665, %134, %135, %float9.999990e-06_1328 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_1333 = torch.constant.int 0
%int1_1334 = torch.constant.int 1
%3666 = torch.aten.transpose.int %130, %int0_1333, %int1_1334 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%3667 = torch.aten.mm %result0_1330, %3666 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%3668 = torch.aten.add.Tensor %3667, %131, %int1_1334 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%3669 = torch.aten.relu %3668 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_1335 = torch.constant.int 0
%int1_1336 = torch.constant.int 1
%3670 = torch.aten.transpose.int %132, %int0_1335, %int1_1336 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%3671 = torch.aten.mm %3669, %3670 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%3672 = torch.aten.add.Tensor %3671, %133, %int1_1336 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_1337 = torch.constant.int 1
%3673 = torch.aten.add.Tensor %3664, %3672, %int1_1337 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%3674 = torch.vtensor.literal(dense_resource<__169> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1338 = torch.constant.int 0
%int0_1339 = torch.constant.int 0
%3675 = torch.aten.select.int %3674, %int0_1338, %int0_1339 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3676 = torch.aten.item %3675 : !torch.vtensor<[1],si64> -> !torch.int
%3677 = torch.aten.eq.int %3676, %int0_1338 : !torch.int, !torch.int -> !torch.bool
%3678 = torch.aten.Int.bool %3677 : !torch.bool -> !torch.int
%int6_1340 = torch.constant.int 6
%3679 = torch.aten.mul.int %3678, %int6_1340 : !torch.int, !torch.int -> !torch.int
%3680 = torch.aten.add.int %3676, %3679 : !torch.int, !torch.int -> !torch.int
%int1_1341 = torch.constant.int 1
%3681 = torch.aten.select.int %3674, %int0_1338, %int1_1341 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3682 = torch.aten.item %3681 : !torch.vtensor<[1],si64> -> !torch.int
%3683 = torch.aten.eq.int %3682, %int0_1338 : !torch.int, !torch.int -> !torch.bool
%3684 = torch.aten.Int.bool %3683 : !torch.bool -> !torch.int
%int768_1342 = torch.constant.int 768
%3685 = torch.aten.mul.int %3684, %int768_1342 : !torch.int, !torch.int -> !torch.int
%3686 = torch.aten.add.int %3682, %3685 : !torch.int, !torch.int -> !torch.int
%int2_1343 = torch.constant.int 2
%3687 = torch.aten.select.int %3674, %int0_1338, %int2_1343 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3688 = torch.aten.item %3687 : !torch.vtensor<[1],si64> -> !torch.int
%3689 = torch.aten.eq.int %3688, %int0_1338 : !torch.int, !torch.int -> !torch.bool
%3690 = torch.aten.Int.bool %3689 : !torch.bool -> !torch.int
%3691 = torch.aten.mul.int %3690, %int0_1338 : !torch.int, !torch.int -> !torch.int
%3692 = torch.aten.add.int %3688, %3691 : !torch.int, !torch.int -> !torch.int
%3693 = torch.prim.ListConstruct %3680, %3686, %3692 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3694 = torch.aten.reshape %3673, %3693 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_1344 = torch.constant.float 9.9999997473787516E-6
%int768_1345 = torch.constant.int 768
%3695 = torch.prim.ListConstruct %int768_1345 : (!torch.int) -> !torch.list<int>
%result0_1346, %result1_1347, %result2_1348 = torch.aten.native_layer_norm %3694, %3695, %140, %141, %float9.999990e-06_1344 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%3696 = torch.aten.matmul %result0_1346, %192 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1349 = torch.constant.int 1
%3697 = torch.aten.add.Tensor %138, %3696, %int1_1349 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3698 = torch.vtensor.literal(dense_resource<__170> : tensor<f32>) : !torch.vtensor<[],f32>
%3699 = torch.aten.mul.Tensor %3697, %3698 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%3700 = torch.aten.matmul %result0_1346, %193 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1350 = torch.constant.int 1
%3701 = torch.aten.add.Tensor %136, %3700, %int1_1350 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3702 = torch.vtensor.literal(dense_resource<__171> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%3703 = torch.vtensor.literal(dense_resource<__172> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1351 = torch.constant.int 0
%int0_1352 = torch.constant.int 0
%3704 = torch.aten.select.int %3702, %int0_1351, %int0_1352 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3705 = torch.aten.item %3704 : !torch.vtensor<[1],si64> -> !torch.int
%3706 = torch.aten.eq.int %3705, %int0_1351 : !torch.int, !torch.int -> !torch.bool
%3707 = torch.aten.Int.bool %3706 : !torch.bool -> !torch.int
%int1_1353 = torch.constant.int 1
%3708 = torch.aten.mul.int %3707, %int1_1353 : !torch.int, !torch.int -> !torch.int
%3709 = torch.aten.add.int %3705, %3708 : !torch.int, !torch.int -> !torch.int
%int1_1354 = torch.constant.int 1
%3710 = torch.aten.select.int %3702, %int0_1351, %int1_1354 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3711 = torch.aten.item %3710 : !torch.vtensor<[1],si64> -> !torch.int
%3712 = torch.aten.eq.int %3711, %int0_1351 : !torch.int, !torch.int -> !torch.bool
%3713 = torch.aten.Int.bool %3712 : !torch.bool -> !torch.int
%int6_1355 = torch.constant.int 6
%3714 = torch.aten.mul.int %3713, %int6_1355 : !torch.int, !torch.int -> !torch.int
%3715 = torch.aten.add.int %3711, %3714 : !torch.int, !torch.int -> !torch.int
%int2_1356 = torch.constant.int 2
%3716 = torch.aten.select.int %3702, %int0_1351, %int2_1356 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3717 = torch.aten.item %3716 : !torch.vtensor<[1],si64> -> !torch.int
%3718 = torch.aten.eq.int %3717, %int0_1351 : !torch.int, !torch.int -> !torch.bool
%3719 = torch.aten.Int.bool %3718 : !torch.bool -> !torch.int
%int768_1357 = torch.constant.int 768
%3720 = torch.aten.mul.int %3719, %int768_1357 : !torch.int, !torch.int -> !torch.int
%3721 = torch.aten.add.int %3717, %3720 : !torch.int, !torch.int -> !torch.int
%int3_1358 = torch.constant.int 3
%3722 = torch.aten.select.int %3702, %int0_1351, %int3_1358 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3723 = torch.aten.item %3722 : !torch.vtensor<[1],si64> -> !torch.int
%3724 = torch.aten.eq.int %3723, %int0_1351 : !torch.int, !torch.int -> !torch.bool
%3725 = torch.aten.Int.bool %3724 : !torch.bool -> !torch.int
%3726 = torch.aten.mul.int %3725, %int0_1351 : !torch.int, !torch.int -> !torch.int
%3727 = torch.aten.add.int %3723, %3726 : !torch.int, !torch.int -> !torch.int
%3728 = torch.prim.ListConstruct %3709, %3715, %3721, %3727 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3729 = torch.aten.reshape %3701, %3728 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1359 = torch.constant.int 1
%int2_1360 = torch.constant.int 2
%3730 = torch.aten.transpose.int %3729, %int1_1359, %int2_1360 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3731 = torch.aten.matmul %result0_1346, %194 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1361 = torch.constant.int 1
%3732 = torch.aten.add.Tensor %137, %3731, %int1_1361 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_1362 = torch.constant.int 0
%int0_1363 = torch.constant.int 0
%3733 = torch.aten.select.int %3703, %int0_1362, %int0_1363 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3734 = torch.aten.item %3733 : !torch.vtensor<[1],si64> -> !torch.int
%3735 = torch.aten.eq.int %3734, %int0_1362 : !torch.int, !torch.int -> !torch.bool
%3736 = torch.aten.Int.bool %3735 : !torch.bool -> !torch.int
%int1_1364 = torch.constant.int 1
%3737 = torch.aten.mul.int %3736, %int1_1364 : !torch.int, !torch.int -> !torch.int
%3738 = torch.aten.add.int %3734, %3737 : !torch.int, !torch.int -> !torch.int
%int1_1365 = torch.constant.int 1
%3739 = torch.aten.select.int %3703, %int0_1362, %int1_1365 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3740 = torch.aten.item %3739 : !torch.vtensor<[1],si64> -> !torch.int
%3741 = torch.aten.eq.int %3740, %int0_1362 : !torch.int, !torch.int -> !torch.bool
%3742 = torch.aten.Int.bool %3741 : !torch.bool -> !torch.int
%int6_1366 = torch.constant.int 6
%3743 = torch.aten.mul.int %3742, %int6_1366 : !torch.int, !torch.int -> !torch.int
%3744 = torch.aten.add.int %3740, %3743 : !torch.int, !torch.int -> !torch.int
%int2_1367 = torch.constant.int 2
%3745 = torch.aten.select.int %3703, %int0_1362, %int2_1367 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3746 = torch.aten.item %3745 : !torch.vtensor<[1],si64> -> !torch.int
%3747 = torch.aten.eq.int %3746, %int0_1362 : !torch.int, !torch.int -> !torch.bool
%3748 = torch.aten.Int.bool %3747 : !torch.bool -> !torch.int
%int768_1368 = torch.constant.int 768
%3749 = torch.aten.mul.int %3748, %int768_1368 : !torch.int, !torch.int -> !torch.int
%3750 = torch.aten.add.int %3746, %3749 : !torch.int, !torch.int -> !torch.int
%int3_1369 = torch.constant.int 3
%3751 = torch.aten.select.int %3703, %int0_1362, %int3_1369 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3752 = torch.aten.item %3751 : !torch.vtensor<[1],si64> -> !torch.int
%3753 = torch.aten.eq.int %3752, %int0_1362 : !torch.int, !torch.int -> !torch.bool
%3754 = torch.aten.Int.bool %3753 : !torch.bool -> !torch.int
%3755 = torch.aten.mul.int %3754, %int0_1362 : !torch.int, !torch.int -> !torch.int
%3756 = torch.aten.add.int %3752, %3755 : !torch.int, !torch.int -> !torch.int
%3757 = torch.prim.ListConstruct %3738, %3744, %3750, %3756 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3758 = torch.aten.reshape %3732, %3757 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1370 = torch.constant.int 1
%int2_1371 = torch.constant.int 2
%3759 = torch.aten.transpose.int %3758, %int1_1370, %int2_1371 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3760 = torch.vtensor.literal(dense_resource<__173> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1372 = torch.constant.int 0
%int0_1373 = torch.constant.int 0
%3761 = torch.aten.select.int %3760, %int0_1372, %int0_1373 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3762 = torch.aten.item %3761 : !torch.vtensor<[1],si64> -> !torch.int
%3763 = torch.aten.eq.int %3762, %int0_1372 : !torch.int, !torch.int -> !torch.bool
%3764 = torch.aten.Int.bool %3763 : !torch.bool -> !torch.int
%int1_1374 = torch.constant.int 1
%3765 = torch.aten.mul.int %3764, %int1_1374 : !torch.int, !torch.int -> !torch.int
%3766 = torch.aten.add.int %3762, %3765 : !torch.int, !torch.int -> !torch.int
%int1_1375 = torch.constant.int 1
%3767 = torch.aten.select.int %3760, %int0_1372, %int1_1375 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3768 = torch.aten.item %3767 : !torch.vtensor<[1],si64> -> !torch.int
%3769 = torch.aten.eq.int %3768, %int0_1372 : !torch.int, !torch.int -> !torch.bool
%3770 = torch.aten.Int.bool %3769 : !torch.bool -> !torch.int
%int6_1376 = torch.constant.int 6
%3771 = torch.aten.mul.int %3770, %int6_1376 : !torch.int, !torch.int -> !torch.int
%3772 = torch.aten.add.int %3768, %3771 : !torch.int, !torch.int -> !torch.int
%int2_1377 = torch.constant.int 2
%3773 = torch.aten.select.int %3760, %int0_1372, %int2_1377 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3774 = torch.aten.item %3773 : !torch.vtensor<[1],si64> -> !torch.int
%3775 = torch.aten.eq.int %3774, %int0_1372 : !torch.int, !torch.int -> !torch.bool
%3776 = torch.aten.Int.bool %3775 : !torch.bool -> !torch.int
%int768_1378 = torch.constant.int 768
%3777 = torch.aten.mul.int %3776, %int768_1378 : !torch.int, !torch.int -> !torch.int
%3778 = torch.aten.add.int %3774, %3777 : !torch.int, !torch.int -> !torch.int
%int3_1379 = torch.constant.int 3
%3779 = torch.aten.select.int %3760, %int0_1372, %int3_1379 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3780 = torch.aten.item %3779 : !torch.vtensor<[1],si64> -> !torch.int
%3781 = torch.aten.eq.int %3780, %int0_1372 : !torch.int, !torch.int -> !torch.bool
%3782 = torch.aten.Int.bool %3781 : !torch.bool -> !torch.int
%3783 = torch.aten.mul.int %3782, %int0_1372 : !torch.int, !torch.int -> !torch.int
%3784 = torch.aten.add.int %3780, %3783 : !torch.int, !torch.int -> !torch.int
%3785 = torch.prim.ListConstruct %3766, %3772, %3778, %3784 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3786 = torch.aten.reshape %3699, %3785 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_1380 = torch.constant.int 1
%int2_1381 = torch.constant.int 2
%3787 = torch.aten.transpose.int %3786, %int1_1380, %int2_1381 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%3788 = torch.vtensor.literal(dense_resource<__174> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%3789 = torch.vtensor.literal(dense_resource<__175> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%3790 = torch.vtensor.literal(dense_resource<__176> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1382 = torch.constant.int 0
%int0_1383 = torch.constant.int 0
%3791 = torch.aten.select.int %3788, %int0_1382, %int0_1383 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3792 = torch.aten.item %3791 : !torch.vtensor<[1],si64> -> !torch.int
%3793 = torch.aten.eq.int %3792, %int0_1382 : !torch.int, !torch.int -> !torch.bool
%3794 = torch.aten.Int.bool %3793 : !torch.bool -> !torch.int
%int1_1384 = torch.constant.int 1
%3795 = torch.aten.mul.int %3794, %int1_1384 : !torch.int, !torch.int -> !torch.int
%3796 = torch.aten.add.int %3792, %3795 : !torch.int, !torch.int -> !torch.int
%int1_1385 = torch.constant.int 1
%3797 = torch.aten.select.int %3788, %int0_1382, %int1_1385 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3798 = torch.aten.item %3797 : !torch.vtensor<[1],si64> -> !torch.int
%3799 = torch.aten.eq.int %3798, %int0_1382 : !torch.int, !torch.int -> !torch.bool
%3800 = torch.aten.Int.bool %3799 : !torch.bool -> !torch.int
%int12_1386 = torch.constant.int 12
%3801 = torch.aten.mul.int %3800, %int12_1386 : !torch.int, !torch.int -> !torch.int
%3802 = torch.aten.add.int %3798, %3801 : !torch.int, !torch.int -> !torch.int
%int2_1387 = torch.constant.int 2
%3803 = torch.aten.select.int %3788, %int0_1382, %int2_1387 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3804 = torch.aten.item %3803 : !torch.vtensor<[1],si64> -> !torch.int
%3805 = torch.aten.eq.int %3804, %int0_1382 : !torch.int, !torch.int -> !torch.bool
%3806 = torch.aten.Int.bool %3805 : !torch.bool -> !torch.int
%int6_1388 = torch.constant.int 6
%3807 = torch.aten.mul.int %3806, %int6_1388 : !torch.int, !torch.int -> !torch.int
%3808 = torch.aten.add.int %3804, %3807 : !torch.int, !torch.int -> !torch.int
%3809 = torch.prim.ListConstruct %3796, %3802, %3808 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3810 = torch.aten.reshape %3787, %3809 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1389 = torch.constant.int 0
%int0_1390 = torch.constant.int 0
%3811 = torch.aten.select.int %3789, %int0_1389, %int0_1390 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3812 = torch.aten.item %3811 : !torch.vtensor<[1],si64> -> !torch.int
%3813 = torch.aten.eq.int %3812, %int0_1389 : !torch.int, !torch.int -> !torch.bool
%3814 = torch.aten.Int.bool %3813 : !torch.bool -> !torch.int
%int1_1391 = torch.constant.int 1
%3815 = torch.aten.mul.int %3814, %int1_1391 : !torch.int, !torch.int -> !torch.int
%3816 = torch.aten.add.int %3812, %3815 : !torch.int, !torch.int -> !torch.int
%int1_1392 = torch.constant.int 1
%3817 = torch.aten.select.int %3789, %int0_1389, %int1_1392 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3818 = torch.aten.item %3817 : !torch.vtensor<[1],si64> -> !torch.int
%3819 = torch.aten.eq.int %3818, %int0_1389 : !torch.int, !torch.int -> !torch.bool
%3820 = torch.aten.Int.bool %3819 : !torch.bool -> !torch.int
%int12_1393 = torch.constant.int 12
%3821 = torch.aten.mul.int %3820, %int12_1393 : !torch.int, !torch.int -> !torch.int
%3822 = torch.aten.add.int %3818, %3821 : !torch.int, !torch.int -> !torch.int
%int2_1394 = torch.constant.int 2
%3823 = torch.aten.select.int %3789, %int0_1389, %int2_1394 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3824 = torch.aten.item %3823 : !torch.vtensor<[1],si64> -> !torch.int
%3825 = torch.aten.eq.int %3824, %int0_1389 : !torch.int, !torch.int -> !torch.bool
%3826 = torch.aten.Int.bool %3825 : !torch.bool -> !torch.int
%int6_1395 = torch.constant.int 6
%3827 = torch.aten.mul.int %3826, %int6_1395 : !torch.int, !torch.int -> !torch.int
%3828 = torch.aten.add.int %3824, %3827 : !torch.int, !torch.int -> !torch.int
%3829 = torch.prim.ListConstruct %3816, %3822, %3828 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3830 = torch.aten.reshape %3730, %3829 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_1396 = torch.constant.int 0
%int0_1397 = torch.constant.int 0
%3831 = torch.aten.select.int %3790, %int0_1396, %int0_1397 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3832 = torch.aten.item %3831 : !torch.vtensor<[1],si64> -> !torch.int
%3833 = torch.aten.eq.int %3832, %int0_1396 : !torch.int, !torch.int -> !torch.bool
%3834 = torch.aten.Int.bool %3833 : !torch.bool -> !torch.int
%int1_1398 = torch.constant.int 1
%3835 = torch.aten.mul.int %3834, %int1_1398 : !torch.int, !torch.int -> !torch.int
%3836 = torch.aten.add.int %3832, %3835 : !torch.int, !torch.int -> !torch.int
%int1_1399 = torch.constant.int 1
%3837 = torch.aten.select.int %3790, %int0_1396, %int1_1399 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3838 = torch.aten.item %3837 : !torch.vtensor<[1],si64> -> !torch.int
%3839 = torch.aten.eq.int %3838, %int0_1396 : !torch.int, !torch.int -> !torch.bool
%3840 = torch.aten.Int.bool %3839 : !torch.bool -> !torch.int
%int12_1400 = torch.constant.int 12
%3841 = torch.aten.mul.int %3840, %int12_1400 : !torch.int, !torch.int -> !torch.int
%3842 = torch.aten.add.int %3838, %3841 : !torch.int, !torch.int -> !torch.int
%int2_1401 = torch.constant.int 2
%3843 = torch.aten.select.int %3790, %int0_1396, %int2_1401 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3844 = torch.aten.item %3843 : !torch.vtensor<[1],si64> -> !torch.int
%3845 = torch.aten.eq.int %3844, %int0_1396 : !torch.int, !torch.int -> !torch.bool
%3846 = torch.aten.Int.bool %3845 : !torch.bool -> !torch.int
%int6_1402 = torch.constant.int 6
%3847 = torch.aten.mul.int %3846, %int6_1402 : !torch.int, !torch.int -> !torch.int
%3848 = torch.aten.add.int %3844, %3847 : !torch.int, !torch.int -> !torch.int
%3849 = torch.prim.ListConstruct %3836, %3842, %3848 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3850 = torch.aten.reshape %3759, %3849 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_1403 = torch.constant.int 1
%int2_1404 = torch.constant.int 2
%3851 = torch.aten.transpose.int %3830, %int1_1403, %int2_1404 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%3852 = torch.aten.matmul %3810, %3851 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%3853 = torch.vtensor.literal(dense_resource<__177> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1405 = torch.constant.int 0
%int0_1406 = torch.constant.int 0
%3854 = torch.aten.select.int %3853, %int0_1405, %int0_1406 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3855 = torch.aten.item %3854 : !torch.vtensor<[1],si64> -> !torch.int
%3856 = torch.aten.eq.int %3855, %int0_1405 : !torch.int, !torch.int -> !torch.bool
%3857 = torch.aten.Int.bool %3856 : !torch.bool -> !torch.int
%int12_1407 = torch.constant.int 12
%3858 = torch.aten.mul.int %3857, %int12_1407 : !torch.int, !torch.int -> !torch.int
%3859 = torch.aten.add.int %3855, %3858 : !torch.int, !torch.int -> !torch.int
%int1_1408 = torch.constant.int 1
%3860 = torch.aten.select.int %3853, %int0_1405, %int1_1408 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3861 = torch.aten.item %3860 : !torch.vtensor<[1],si64> -> !torch.int
%3862 = torch.aten.eq.int %3861, %int0_1405 : !torch.int, !torch.int -> !torch.bool
%3863 = torch.aten.Int.bool %3862 : !torch.bool -> !torch.int
%int6_1409 = torch.constant.int 6
%3864 = torch.aten.mul.int %3863, %int6_1409 : !torch.int, !torch.int -> !torch.int
%3865 = torch.aten.add.int %3861, %3864 : !torch.int, !torch.int -> !torch.int
%int2_1410 = torch.constant.int 2
%3866 = torch.aten.select.int %3853, %int0_1405, %int2_1410 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3867 = torch.aten.item %3866 : !torch.vtensor<[1],si64> -> !torch.int
%3868 = torch.aten.eq.int %3867, %int0_1405 : !torch.int, !torch.int -> !torch.bool
%3869 = torch.aten.Int.bool %3868 : !torch.bool -> !torch.int
%int6_1411 = torch.constant.int 6
%3870 = torch.aten.mul.int %3869, %int6_1411 : !torch.int, !torch.int -> !torch.int
%3871 = torch.aten.add.int %3867, %3870 : !torch.int, !torch.int -> !torch.int
%int3_1412 = torch.constant.int 3
%3872 = torch.aten.select.int %3853, %int0_1405, %int3_1412 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3873 = torch.aten.item %3872 : !torch.vtensor<[1],si64> -> !torch.int
%3874 = torch.aten.eq.int %3873, %int0_1405 : !torch.int, !torch.int -> !torch.bool
%3875 = torch.aten.Int.bool %3874 : !torch.bool -> !torch.int
%3876 = torch.aten.mul.int %3875, %int0_1405 : !torch.int, !torch.int -> !torch.int
%3877 = torch.aten.add.int %3873, %3876 : !torch.int, !torch.int -> !torch.int
%3878 = torch.prim.ListConstruct %3859, %3865, %3871, %3877 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3879 = torch.aten.reshape %3852, %3878 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_1413 = torch.constant.int 1
%3880 = torch.aten.add.Tensor %3879, %277, %int1_1413 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%3881 = torch.vtensor.literal(dense_resource<__178> : tensor<f32>) : !torch.vtensor<[],f32>
%3882 = torch.aten.maximum %3880, %3881 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%3883 = torch.vtensor.literal(dense_resource<__179> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1414 = torch.constant.int 0
%int0_1415 = torch.constant.int 0
%3884 = torch.aten.select.int %3883, %int0_1414, %int0_1415 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3885 = torch.aten.item %3884 : !torch.vtensor<[1],si64> -> !torch.int
%3886 = torch.aten.eq.int %3885, %int0_1414 : !torch.int, !torch.int -> !torch.bool
%3887 = torch.aten.Int.bool %3886 : !torch.bool -> !torch.int
%int-1_1416 = torch.constant.int -1
%3888 = torch.aten.mul.int %3887, %int-1_1416 : !torch.int, !torch.int -> !torch.int
%3889 = torch.aten.add.int %3885, %3888 : !torch.int, !torch.int -> !torch.int
%int1_1417 = torch.constant.int 1
%3890 = torch.aten.select.int %3883, %int0_1414, %int1_1417 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3891 = torch.aten.item %3890 : !torch.vtensor<[1],si64> -> !torch.int
%3892 = torch.aten.eq.int %3891, %int0_1414 : !torch.int, !torch.int -> !torch.bool
%3893 = torch.aten.Int.bool %3892 : !torch.bool -> !torch.int
%int12_1418 = torch.constant.int 12
%3894 = torch.aten.mul.int %3893, %int12_1418 : !torch.int, !torch.int -> !torch.int
%3895 = torch.aten.add.int %3891, %3894 : !torch.int, !torch.int -> !torch.int
%int2_1419 = torch.constant.int 2
%3896 = torch.aten.select.int %3883, %int0_1414, %int2_1419 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3897 = torch.aten.item %3896 : !torch.vtensor<[1],si64> -> !torch.int
%3898 = torch.aten.eq.int %3897, %int0_1414 : !torch.int, !torch.int -> !torch.bool
%3899 = torch.aten.Int.bool %3898 : !torch.bool -> !torch.int
%int6_1420 = torch.constant.int 6
%3900 = torch.aten.mul.int %3899, %int6_1420 : !torch.int, !torch.int -> !torch.int
%3901 = torch.aten.add.int %3897, %3900 : !torch.int, !torch.int -> !torch.int
%3902 = torch.prim.ListConstruct %3889, %3895, %3901 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3903 = torch.aten.reshape %3882, %3902 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_1421 = torch.constant.int 2
%none_1422 = torch.constant.none
%3904 = torch.aten.softmax.int %3903, %int2_1421, %none_1422 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%3905 = torch.aten.matmul %3904, %3850 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%3906 = torch.vtensor.literal(dense_resource<__180> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_1423 = torch.constant.int 0
%int0_1424 = torch.constant.int 0
%3907 = torch.aten.select.int %3906, %int0_1423, %int0_1424 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3908 = torch.aten.item %3907 : !torch.vtensor<[1],si64> -> !torch.int
%3909 = torch.aten.eq.int %3908, %int0_1423 : !torch.int, !torch.int -> !torch.bool
%3910 = torch.aten.Int.bool %3909 : !torch.bool -> !torch.int
%int12_1425 = torch.constant.int 12
%3911 = torch.aten.mul.int %3910, %int12_1425 : !torch.int, !torch.int -> !torch.int
%3912 = torch.aten.add.int %3908, %3911 : !torch.int, !torch.int -> !torch.int
%int1_1426 = torch.constant.int 1
%3913 = torch.aten.select.int %3906, %int0_1423, %int1_1426 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3914 = torch.aten.item %3913 : !torch.vtensor<[1],si64> -> !torch.int
%3915 = torch.aten.eq.int %3914, %int0_1423 : !torch.int, !torch.int -> !torch.bool
%3916 = torch.aten.Int.bool %3915 : !torch.bool -> !torch.int
%int6_1427 = torch.constant.int 6
%3917 = torch.aten.mul.int %3916, %int6_1427 : !torch.int, !torch.int -> !torch.int
%3918 = torch.aten.add.int %3914, %3917 : !torch.int, !torch.int -> !torch.int
%int2_1428 = torch.constant.int 2
%3919 = torch.aten.select.int %3906, %int0_1423, %int2_1428 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3920 = torch.aten.item %3919 : !torch.vtensor<[1],si64> -> !torch.int
%3921 = torch.aten.eq.int %3920, %int0_1423 : !torch.int, !torch.int -> !torch.bool
%3922 = torch.aten.Int.bool %3921 : !torch.bool -> !torch.int
%int64_1429 = torch.constant.int 64
%3923 = torch.aten.mul.int %3922, %int64_1429 : !torch.int, !torch.int -> !torch.int
%3924 = torch.aten.add.int %3920, %3923 : !torch.int, !torch.int -> !torch.int
%int3_1430 = torch.constant.int 3
%3925 = torch.aten.select.int %3906, %int0_1423, %int3_1430 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3926 = torch.aten.item %3925 : !torch.vtensor<[1],si64> -> !torch.int
%3927 = torch.aten.eq.int %3926, %int0_1423 : !torch.int, !torch.int -> !torch.bool
%3928 = torch.aten.Int.bool %3927 : !torch.bool -> !torch.int
%3929 = torch.aten.mul.int %3928, %int0_1423 : !torch.int, !torch.int -> !torch.int
%3930 = torch.aten.add.int %3926, %3929 : !torch.int, !torch.int -> !torch.int
%3931 = torch.prim.ListConstruct %3912, %3918, %3924, %3930 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3932 = torch.aten.reshape %3905, %3931 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_1431 = torch.constant.int 1
%int2_1432 = torch.constant.int 2
%3933 = torch.aten.transpose.int %3932, %int1_1431, %int2_1432 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%3934 = torch.vtensor.literal(dense_resource<__181> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1433 = torch.constant.int 0
%int0_1434 = torch.constant.int 0
%3935 = torch.aten.select.int %3934, %int0_1433, %int0_1434 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3936 = torch.aten.item %3935 : !torch.vtensor<[1],si64> -> !torch.int
%3937 = torch.aten.eq.int %3936, %int0_1433 : !torch.int, !torch.int -> !torch.bool
%3938 = torch.aten.Int.bool %3937 : !torch.bool -> !torch.int
%int1_1435 = torch.constant.int 1
%3939 = torch.aten.mul.int %3938, %int1_1435 : !torch.int, !torch.int -> !torch.int
%3940 = torch.aten.add.int %3936, %3939 : !torch.int, !torch.int -> !torch.int
%int1_1436 = torch.constant.int 1
%3941 = torch.aten.select.int %3934, %int0_1433, %int1_1436 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3942 = torch.aten.item %3941 : !torch.vtensor<[1],si64> -> !torch.int
%3943 = torch.aten.eq.int %3942, %int0_1433 : !torch.int, !torch.int -> !torch.bool
%3944 = torch.aten.Int.bool %3943 : !torch.bool -> !torch.int
%int6_1437 = torch.constant.int 6
%3945 = torch.aten.mul.int %3944, %int6_1437 : !torch.int, !torch.int -> !torch.int
%3946 = torch.aten.add.int %3942, %3945 : !torch.int, !torch.int -> !torch.int
%int2_1438 = torch.constant.int 2
%3947 = torch.aten.select.int %3934, %int0_1433, %int2_1438 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3948 = torch.aten.item %3947 : !torch.vtensor<[1],si64> -> !torch.int
%3949 = torch.aten.eq.int %3948, %int0_1433 : !torch.int, !torch.int -> !torch.bool
%3950 = torch.aten.Int.bool %3949 : !torch.bool -> !torch.int
%int12_1439 = torch.constant.int 12
%3951 = torch.aten.mul.int %3950, %int12_1439 : !torch.int, !torch.int -> !torch.int
%3952 = torch.aten.add.int %3948, %3951 : !torch.int, !torch.int -> !torch.int
%3953 = torch.prim.ListConstruct %3940, %3946, %3952 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%3954 = torch.aten.reshape %3933, %3953 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%3955 = torch.aten.matmul %3954, %195 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_1440 = torch.constant.int 1
%3956 = torch.aten.add.Tensor %139, %3955, %int1_1440 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_1441 = torch.constant.int 1
%3957 = torch.aten.add.Tensor %3694, %3956, %int1_1441 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%3958 = torch.vtensor.literal(dense_resource<__182> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_1442 = torch.constant.int 0
%int0_1443 = torch.constant.int 0
%3959 = torch.aten.select.int %3958, %int0_1442, %int0_1443 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3960 = torch.aten.item %3959 : !torch.vtensor<[1],si64> -> !torch.int
%3961 = torch.aten.eq.int %3960, %int0_1442 : !torch.int, !torch.int -> !torch.bool
%3962 = torch.aten.Int.bool %3961 : !torch.bool -> !torch.int
%int1_1444 = torch.constant.int 1
%3963 = torch.aten.mul.int %3962, %int1_1444 : !torch.int, !torch.int -> !torch.int
%3964 = torch.aten.add.int %3960, %3963 : !torch.int, !torch.int -> !torch.int
%int1_1445 = torch.constant.int 1
%3965 = torch.aten.select.int %3958, %int0_1442, %int1_1445 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3966 = torch.aten.item %3965 : !torch.vtensor<[1],si64> -> !torch.int
%3967 = torch.aten.eq.int %3966, %int0_1442 : !torch.int, !torch.int -> !torch.bool
%3968 = torch.aten.Int.bool %3967 : !torch.bool -> !torch.int
%int6_1446 = torch.constant.int 6
%3969 = torch.aten.mul.int %3968, %int6_1446 : !torch.int, !torch.int -> !torch.int
%3970 = torch.aten.add.int %3966, %3969 : !torch.int, !torch.int -> !torch.int
%3971 = torch.prim.ListConstruct %3964, %3970 : (!torch.int, !torch.int) -> !torch.list<int>
%3972 = torch.aten.reshape %3957, %3971 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_1447 = torch.constant.float 9.9999997473787516E-6
%int768_1448 = torch.constant.int 768
%3973 = torch.prim.ListConstruct %int768_1448 : (!torch.int) -> !torch.list<int>
%result0_1449, %result1_1450, %result2_1451 = torch.aten.native_layer_norm %3972, %3973, %146, %147, %float9.999990e-06_1447 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_1452 = torch.constant.int 0
%int1_1453 = torch.constant.int 1
%3974 = torch.aten.transpose.int %142, %int0_1452, %int1_1453 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%3975 = torch.aten.mm %result0_1449, %3974 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%3976 = torch.aten.add.Tensor %3975, %143, %int1_1453 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%3977 = torch.aten.relu %3976 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_1454 = torch.constant.int 0
%int1_1455 = torch.constant.int 1
%3978 = torch.aten.transpose.int %144, %int0_1454, %int1_1455 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%3979 = torch.aten.mm %3977, %3978 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%3980 = torch.aten.add.Tensor %3979, %145, %int1_1455 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_1456 = torch.constant.int 1
%3981 = torch.aten.add.Tensor %3972, %3980, %int1_1456 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%3982 = torch.vtensor.literal(dense_resource<__183> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_1457 = torch.constant.int 0
%int0_1458 = torch.constant.int 0
%3983 = torch.aten.select.int %3982, %int0_1457, %int0_1458 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3984 = torch.aten.item %3983 : !torch.vtensor<[1],si64> -> !torch.int
%3985 = torch.aten.eq.int %3984, %int0_1457 : !torch.int, !torch.int -> !torch.bool
%3986 = torch.aten.Int.bool %3985 : !torch.bool -> !torch.int
%int6_1459 = torch.constant.int 6
%3987 = torch.aten.mul.int %3986, %int6_1459 : !torch.int, !torch.int -> !torch.int
%3988 = torch.aten.add.int %3984, %3987 : !torch.int, !torch.int -> !torch.int
%int1_1460 = torch.constant.int 1
%3989 = torch.aten.select.int %3982, %int0_1457, %int1_1460 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3990 = torch.aten.item %3989 : !torch.vtensor<[1],si64> -> !torch.int
%3991 = torch.aten.eq.int %3990, %int0_1457 : !torch.int, !torch.int -> !torch.bool
%3992 = torch.aten.Int.bool %3991 : !torch.bool -> !torch.int
%int768_1461 = torch.constant.int 768
%3993 = torch.aten.mul.int %3992, %int768_1461 : !torch.int, !torch.int -> !torch.int
%3994 = torch.aten.add.int %3990, %3993 : !torch.int, !torch.int -> !torch.int
%int2_1462 = torch.constant.int 2
%3995 = torch.aten.select.int %3982, %int0_1457, %int2_1462 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3996 = torch.aten.item %3995 : !torch.vtensor<[1],si64> -> !torch.int
%3997 = torch.aten.eq.int %3996, %int0_1457 : !torch.int, !torch.int -> !torch.bool
%3998 = torch.aten.Int.bool %3997 : !torch.bool -> !torch.int
%3999 = torch.aten.mul.int %3998, %int0_1457 : !torch.int, !torch.int -> !torch.int
%4000 = torch.aten.add.int %3996, %3999 : !torch.int, !torch.int -> !torch.int
%4001 = torch.prim.ListConstruct %3988, %3994, %4000 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%4002 = torch.aten.reshape %3981, %4001 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_1463 = torch.constant.float 9.9999997473787516E-6
%int768_1464 = torch.constant.int 768
%4003 = torch.prim.ListConstruct %int768_1464 : (!torch.int) -> !torch.list<int>
%result0_1465, %result1_1466, %result2_1467 = torch.aten.native_layer_norm %4002, %4003, %2, %3, %float9.999990e-06_1463 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
return %result0_1465 : !torch.vtensor<[1,6,768],f32>
}
} -> SUCCESS
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.mul.int'(0x5616a31d8bd0) {
%232 = "torch.aten.mul.int"(%217, %231) : (!torch.int, !torch.int) -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.aten.mul.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a31d8ce0) {
%233 = "torch.constant.int"() <{value = 1 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.size.int'(0x5616a31d8da0) {
%234 = "torch.aten.size.int"(%215, %233) : (!torch.vtensor<[1,6],si64>, !torch.int) -> !torch.int
* Fold {
** Insert : 'torch.constant.int'(0x5616a321cf10)
** Replace : 'torch.aten.size.int'(0x5616a31d8da0)
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x5616a321cf10) {
%234 = "torch.constant.int"() <{value = 6 : i64}> : () -> !torch.int
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenScalarToTensorLike"
** Failure : not a supported Scalar to Tensor like op
"(anonymous namespace)::ConvertAtenScalarToTensorLike" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertElementwiseOp"
** Failure : not a supported elementwise op
"(anonymous namespace)::ConvertElementwiseOp" result 0
} -> FAILURE : pattern failed to match
* Pattern : 'torch.constant.int -> ()' {
Trying to match "(anonymous namespace)::ConvertReductionOp"
** Failure : not a supported reduce op
"(anonymous namespace)::ConvertReductionOp" result 0
} -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
} -> FAILURE : failed to legalize generated constant 'torch.constant.int'
* Pattern : 'torch.aten.size.int -> ()' {
Trying to match "(anonymous namespace)::ConvertAtenSizeIntOp"
** Insert : 'arith.constant'(0x5616a3329bc0)
** Insert : 'arith.addi'(0x5616a32c8220)
** Insert : 'arith.constant'(0x5616a335e620)
** Insert : 'arith.cmpi'(0x5616a3367240)
** Insert : 'arith.select'(0x5616a32b94a0)
** Insert : 'arith.constant'(0x5616a3289490)
** Insert : 'arith.cmpi'(0x5616a31e0600)
** Insert : 'cf.assert'(0x5616a32305e0)
** Insert : 'arith.cmpi'(0x5616a32d6430)
** Insert : 'cf.assert'(0x5616a33894d0)
** Insert : 'arith.index_cast'(0x5616a33234d0)
** Insert : 'tensor.dim'(0x5616a32a5b50)
** Insert : 'arith.index_cast'(0x5616a3369470)
** Replace : 'torch.aten.size.int'(0x5616a31d8da0)
"(anonymous namespace)::ConvertAtenSizeIntOp" result 1
//===-------------------------------------------===//
Legalizing operation : 'arith.constant'(0x5616a3329bc0) {
%235 = "arith.constant"() <{value = 2 : i64}> : () -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.addi'(0x5616a32c8220) {
%236 = "arith.addi"(%234, %235) : (i64, i64) -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.constant'(0x5616a335e620) {
%237 = "arith.constant"() <{value = 0 : i64}> : () -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.cmpi'(0x5616a3367240) {
%238 = "arith.cmpi"(%234, %237) <{predicate = 5 : i64}> : (i64, i64) -> i1
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.select'(0x5616a32b94a0) {
%239 = "arith.select"(%238, %234, %236) : (i1, i64, i64) -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.constant'(0x5616a3289490) {
%240 = "arith.constant"() <{value = 0 : i64}> : () -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.cmpi'(0x5616a31e0600) {
%241 = "arith.cmpi"(%239, %240) <{predicate = 5 : i64}> : (i64, i64) -> i1
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'cf.assert'(0x5616a32305e0) {
"cf.assert"(%241) <{msg = "dim must be greater or equal to zero"}> : (i1) -> ()
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.cmpi'(0x5616a32d6430) {
%242 = "arith.cmpi"(%239, %235) <{predicate = 2 : i64}> : (i64, i64) -> i1
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'cf.assert'(0x5616a33894d0) {
"cf.assert"(%242) <{msg = "dim must be smaller than inputRank"}> : (i1) -> ()
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.index_cast'(0x5616a33234d0) {
%243 = "arith.index_cast"(%239) : (i64) -> index
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'tensor.dim'(0x5616a32a5b50) {
%244 = "tensor.dim"(%216, %243) : (tensor<1x6xi64>, index) -> index
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'arith.index_cast'(0x5616a3369470) {
%245 = "arith.index_cast"(%244) : (index) -> i64
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
} -> SUCCESS : pattern applied successfully
// *** IR Dump After Pattern Application ***
mlir-asm-printer: Verifying operation: func.func
func.func @main_graph(%arg0: !torch.vtensor<[1,6],si64>) -> !torch.vtensor<[1,6,768],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "2.3.0"} {
%0 = torch.vtensor.literal(dense_resource<_model.decoder.embed_tokens.weight> : tensor<50272x768xf32>) : !torch.vtensor<[50272,768],f32>
%1 = torch.vtensor.literal(dense_resource<_model.decoder.embed_positions.weight> : tensor<2050x768xf32>) : !torch.vtensor<[2050,768],f32>
%2 = torch.vtensor.literal(dense_resource<_model.decoder.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%3 = torch.vtensor.literal(dense_resource<_model.decoder.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%4 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%5 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%6 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%7 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%8 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%9 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%10 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%11 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%12 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%13 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%14 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%15 = torch.vtensor.literal(dense_resource<_model.decoder.layers.0.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%16 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%17 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%18 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%19 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%20 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%21 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%22 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%23 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%24 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%25 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%26 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%27 = torch.vtensor.literal(dense_resource<_model.decoder.layers.1.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%28 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%29 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%30 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%31 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%32 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%33 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%34 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%35 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%36 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%37 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%38 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%39 = torch.vtensor.literal(dense_resource<_model.decoder.layers.2.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%40 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%41 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%42 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%43 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%44 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%45 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%46 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%47 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%48 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%49 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%50 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%51 = torch.vtensor.literal(dense_resource<_model.decoder.layers.3.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%52 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%53 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%54 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%55 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%56 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%57 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%58 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%59 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%60 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%61 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%62 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%63 = torch.vtensor.literal(dense_resource<_model.decoder.layers.4.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%64 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%65 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%66 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%67 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%68 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%69 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%70 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%71 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%72 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%73 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%74 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%75 = torch.vtensor.literal(dense_resource<_model.decoder.layers.5.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%76 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%77 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%78 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%79 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%80 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%81 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%82 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%83 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%84 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%85 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%86 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%87 = torch.vtensor.literal(dense_resource<_model.decoder.layers.6.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%88 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%89 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%90 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%91 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%92 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%93 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%94 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%95 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%96 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%97 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%98 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%99 = torch.vtensor.literal(dense_resource<_model.decoder.layers.7.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%100 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%101 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%102 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%103 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%104 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%105 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%106 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%107 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%108 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%109 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%110 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%111 = torch.vtensor.literal(dense_resource<_model.decoder.layers.8.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%112 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%113 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%114 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%115 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%116 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%117 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%118 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%119 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%120 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%121 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%122 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%123 = torch.vtensor.literal(dense_resource<_model.decoder.layers.9.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%124 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%125 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%126 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%127 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%128 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%129 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%130 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%131 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%132 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%133 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%134 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%135 = torch.vtensor.literal(dense_resource<_model.decoder.layers.10.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%136 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.k_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%137 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.v_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%138 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.q_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%139 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn.out_proj.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%140 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%141 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.self_attn_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%142 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc1.weight> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32>
%143 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc1.bias> : tensor<3072xf32>) : !torch.vtensor<[3072],f32>
%144 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc2.weight> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32>
%145 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.fc2.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%146 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.final_layer_norm.weight> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%147 = torch.vtensor.literal(dense_resource<_model.decoder.layers.11.final_layer_norm.bias> : tensor<768xf32>) : !torch.vtensor<[768],f32>
%148 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2046> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%149 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2047> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%150 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2058> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%151 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2094> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%152 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2102> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%153 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2103> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%154 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2114> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%155 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2150> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%156 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2158> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%157 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2159> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%158 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2170> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%159 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2206> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%160 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2214> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%161 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2215> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%162 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2226> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%163 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2262> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%164 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2270> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%165 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2271> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%166 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2282> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%167 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2318> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%168 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2326> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%169 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2327> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%170 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2338> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%171 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2374> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%172 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2382> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%173 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2383> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%174 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2394> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%175 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2430> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%176 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2438> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%177 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2439> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%178 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2450> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%179 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2486> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%180 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2494> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%181 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2495> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%182 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2506> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%183 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2542> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%184 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2550> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%185 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2551> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%186 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2562> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%187 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2598> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%188 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2606> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%189 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2607> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%190 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2618> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%191 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2654> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%192 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2662> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%193 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2663> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%194 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2674> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%195 = torch.vtensor.literal(dense_resource<_onnx__MatMul_2710> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32>
%196 = torch.vtensor.literal(dense_resource<_> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0 = torch.constant.int 0
%int0_0 = torch.constant.int 0
%197 = torch.aten.select.int %196, %int0, %int0_0 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%198 = torch.aten.item %197 : !torch.vtensor<[1],si64> -> !torch.int
%199 = torch.aten.eq.int %198, %int0 : !torch.int, !torch.int -> !torch.bool
%200 = torch.aten.Int.bool %199 : !torch.bool -> !torch.int
%int1 = torch.constant.int 1
%201 = torch.aten.mul.int %200, %int1 : !torch.int, !torch.int -> !torch.int
%202 = torch.aten.add.int %198, %201 : !torch.int, !torch.int -> !torch.int
%int1_1 = torch.constant.int 1
%203 = torch.aten.select.int %196, %int0, %int1_1 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%204 = torch.aten.item %203 : !torch.vtensor<[1],si64> -> !torch.int
%205 = torch.aten.eq.int %204, %int0 : !torch.int, !torch.int -> !torch.bool
%206 = torch.aten.Int.bool %205 : !torch.bool -> !torch.int
%int6 = torch.constant.int 6
%207 = torch.aten.mul.int %206, %int6 : !torch.int, !torch.int -> !torch.int
%208 = torch.aten.add.int %204, %207 : !torch.int, !torch.int -> !torch.int
%209 = torch.prim.ListConstruct %202, %208 : (!torch.int, !torch.int) -> !torch.list<int>
%210 = torch.aten.reshape %arg0, %209 : !torch.vtensor<[1,6],si64>, !torch.list<int> -> !torch.vtensor<[1,6],si64>
%211 = builtin.unrealized_conversion_cast %210 : !torch.vtensor<[1,6],si64> to tensor<1x6xi64>
%int1_2 = torch.constant.int 1
%int0_3 = torch.constant.int 0
%212 = builtin.unrealized_conversion_cast %int0_3 : !torch.int to i64
%c2_i64 = arith.constant 2 : i64
%213 = arith.addi %212, %c2_i64 : i64
%c0_i64 = arith.constant 0 : i64
%214 = arith.cmpi sge, %212, %c0_i64 : i64
%215 = arith.select %214, %212, %213 : i64
%c0_i64_4 = arith.constant 0 : i64
%216 = arith.cmpi sge, %215, %c0_i64_4 : i64
cf.assert %216, "dim must be greater or equal to zero"
%217 = arith.cmpi slt, %215, %c2_i64 : i64
cf.assert %217, "dim must be smaller than inputRank"
%218 = arith.index_cast %215 : i64 to index
%dim = tensor.dim %211, %218 : tensor<1x6xi64>
%219 = arith.index_cast %dim : index to i64
%220 = torch.aten.size.int %210, %int0_3 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%221 = torch.aten.mul.int %int1_2, %220 : !torch.int, !torch.int -> !torch.int
%int1_5 = torch.constant.int 1
%222 = builtin.unrealized_conversion_cast %int1_5 : !torch.int to i64
%c2_i64_6 = arith.constant 2 : i64
%223 = arith.addi %222, %c2_i64_6 : i64
%c0_i64_7 = arith.constant 0 : i64
%224 = arith.cmpi sge, %222, %c0_i64_7 : i64
%225 = arith.select %224, %222, %223 : i64
%c0_i64_8 = arith.constant 0 : i64
%226 = arith.cmpi sge, %225, %c0_i64_8 : i64
cf.assert %226, "dim must be greater or equal to zero"
%227 = arith.cmpi slt, %225, %c2_i64_6 : i64
cf.assert %227, "dim must be smaller than inputRank"
%228 = arith.index_cast %225 : i64 to index
%dim_9 = tensor.dim %211, %228 : tensor<1x6xi64>
%229 = arith.index_cast %dim_9 : index to i64
%230 = torch.aten.size.int %210, %int1_5 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%231 = torch.aten.mul.int %221, %230 : !torch.int, !torch.int -> !torch.int
%int0_10 = torch.constant.int 0
%232 = torch.aten.size.int %0, %int0_10 : !torch.vtensor<[50272,768],f32>, !torch.int -> !torch.int
%int1_11 = torch.constant.int 1
%233 = torch.aten.size.int %0, %int1_11 : !torch.vtensor<[50272,768],f32>, !torch.int -> !torch.int
%234 = torch.prim.ListConstruct %231, %int1_2 : (!torch.int, !torch.int) -> !torch.list<int>
%235 = torch.aten.view %210, %234 : !torch.vtensor<[1,6],si64>, !torch.list<int> -> !torch.vtensor<[6,1],si64>
%int0_12 = torch.constant.int 0
%false = torch.constant.bool false
%236 = torch.aten.gather %0, %int0_12, %235, %false : !torch.vtensor<[50272,768],f32>, !torch.int, !torch.vtensor<[6,1],si64>, !torch.bool -> !torch.vtensor<[6,1],f32>
%237 = torch.prim.ListConstruct %231, %233 : (!torch.int, !torch.int) -> !torch.list<int>
%238 = torch.aten.expand %236, %237, %false : !torch.vtensor<[6,1],f32>, !torch.list<int>, !torch.bool -> !torch.vtensor<[6,768],f32>
%239 = torch.prim.ListConstruct %220, %230, %233 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%240 = torch.aten.view %238, %239 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%241 = torch.vtensor.literal(dense_resource<__1> : tensor<1x1x6x6xf32>) : !torch.vtensor<[1,1,6,6],f32>
%242 = torch.vtensor.literal(dense_resource<__2> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%243 = torch.vtensor.literal(dense_resource<__3> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%244 = torch.vtensor.literal(dense<1> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%245 = torch.vtensor.literal(dense_resource<__4> : tensor<si64>) : !torch.vtensor<[],si64>
%246 = torch.aten.mul.Tensor %244, %245 : !torch.vtensor<[4],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[4],si64>
%247 = torch.aten.eq.Tensor %242, %246 : !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],i1>
%248 = torch.aten.where.self %247, %244, %242 : !torch.vtensor<[4],i1>, !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],si64>
%int0_13 = torch.constant.int 0
%int0_14 = torch.constant.int 0
%249 = torch.aten.select.int %248, %int0_13, %int0_14 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%250 = torch.aten.item %249 : !torch.vtensor<[1],si64> -> !torch.int
%int1_15 = torch.constant.int 1
%251 = torch.aten.select.int %248, %int0_13, %int1_15 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%252 = torch.aten.item %251 : !torch.vtensor<[1],si64> -> !torch.int
%int2 = torch.constant.int 2
%253 = torch.aten.select.int %248, %int0_13, %int2 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%254 = torch.aten.item %253 : !torch.vtensor<[1],si64> -> !torch.int
%int3 = torch.constant.int 3
%255 = torch.aten.select.int %248, %int0_13, %int3 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%256 = torch.aten.item %255 : !torch.vtensor<[1],si64> -> !torch.int
%257 = torch.prim.ListConstruct %250, %252, %254, %256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%258 = torch.aten.broadcast_to %241, %257 : !torch.vtensor<[1,1,6,6],f32>, !torch.list<int> -> !torch.vtensor<[?,?,6,6],f32>
%259 = torch.vtensor.literal(dense_resource<__5> : tensor<1x1x1x6xf32>) : !torch.vtensor<[1,1,1,6],f32>
%260 = torch.vtensor.literal(dense_resource<__6> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%261 = torch.vtensor.literal(dense_resource<__7> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%262 = torch.vtensor.literal(dense<1> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%263 = torch.vtensor.literal(dense_resource<__8> : tensor<si64>) : !torch.vtensor<[],si64>
%264 = torch.aten.mul.Tensor %262, %263 : !torch.vtensor<[4],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[4],si64>
%265 = torch.aten.eq.Tensor %260, %264 : !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],i1>
%266 = torch.aten.where.self %265, %262, %260 : !torch.vtensor<[4],i1>, !torch.vtensor<[4],si64>, !torch.vtensor<[4],si64> -> !torch.vtensor<[4],si64>
%int0_16 = torch.constant.int 0
%int0_17 = torch.constant.int 0
%267 = torch.aten.select.int %266, %int0_16, %int0_17 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%268 = torch.aten.item %267 : !torch.vtensor<[1],si64> -> !torch.int
%int1_18 = torch.constant.int 1
%269 = torch.aten.select.int %266, %int0_16, %int1_18 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%270 = torch.aten.item %269 : !torch.vtensor<[1],si64> -> !torch.int
%int2_19 = torch.constant.int 2
%271 = torch.aten.select.int %266, %int0_16, %int2_19 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%272 = torch.aten.item %271 : !torch.vtensor<[1],si64> -> !torch.int
%int3_20 = torch.constant.int 3
%273 = torch.aten.select.int %266, %int0_16, %int3_20 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%274 = torch.aten.item %273 : !torch.vtensor<[1],si64> -> !torch.int
%275 = torch.prim.ListConstruct %268, %270, %272, %274 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%276 = torch.aten.broadcast_to %259, %275 : !torch.vtensor<[1,1,1,6],f32>, !torch.list<int> -> !torch.vtensor<[?,?,?,6],f32>
%int6_21 = torch.constant.int 6
%none = torch.constant.none
%false_22 = torch.constant.bool false
%277 = torch.aten.to.dtype %276, %int6_21, %false_22, %false_22, %none : !torch.vtensor<[?,?,?,6],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],f32>
%278 = torch.vtensor.literal(dense_resource<__9> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_23 = torch.constant.int 1
%279 = torch.aten.sub.Tensor %278, %277, %int1_23 : !torch.vtensor<[],f32>, !torch.vtensor<[?,?,?,6],f32>, !torch.int -> !torch.vtensor<[?,?,?,6],f32>
%int11 = torch.constant.int 11
%none_24 = torch.constant.none
%false_25 = torch.constant.bool false
%280 = torch.aten.to.dtype %279, %int11, %false_25, %false_25, %none_24 : !torch.vtensor<[?,?,?,6],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],i1>
%int11_26 = torch.constant.int 11
%none_27 = torch.constant.none
%false_28 = torch.constant.bool false
%281 = torch.aten.to.dtype %280, %int11_26, %false_28, %false_28, %none_27 : !torch.vtensor<[?,?,?,6],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],i1>
%282 = torch.vtensor.literal(dense_resource<__10> : tensor<f32>) : !torch.vtensor<[],f32>
%283 = torch.aten.where.self %281, %282, %279 : !torch.vtensor<[?,?,?,6],i1>, !torch.vtensor<[],f32>, !torch.vtensor<[?,?,?,6],f32> -> !torch.vtensor<[?,?,?,6],f32>
%int6_29 = torch.constant.int 6
%none_30 = torch.constant.none
%false_31 = torch.constant.bool false
%284 = torch.aten.to.dtype %283, %int6_29, %false_31, %false_31, %none_30 : !torch.vtensor<[?,?,?,6],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[?,?,?,6],f32>
%int1_32 = torch.constant.int 1
%285 = torch.aten.add.Tensor %284, %258, %int1_32 : !torch.vtensor<[?,?,?,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,?,6,6],f32>
%286 = torch.vtensor.literal(dense_resource<__11> : tensor<1x6xsi64>) : !torch.vtensor<[1,6],si64>
%287 = torch.vtensor.literal(dense_resource<__12> : tensor<si32>) : !torch.vtensor<[],si32>
%int2_33 = torch.constant.int 2
%int0_34 = torch.constant.int 0
%288 = torch.aten.item %287 : !torch.vtensor<[],si32> -> !torch.int
%289 = torch.aten.lt.int %288, %int0_34 : !torch.int, !torch.int -> !torch.bool
%290 = torch.aten.Int.bool %289 : !torch.bool -> !torch.int
%291 = torch.aten.mul.int %290, %int2_33 : !torch.int, !torch.int -> !torch.int
%292 = torch.aten.add.int %288, %291 : !torch.int, !torch.int -> !torch.int
%int4 = torch.constant.int 4
%293 = torch.aten.cumsum %286, %292, %int4 : !torch.vtensor<[1,6],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,6],si64>
%294 = torch.vtensor.literal(dense_resource<__13> : tensor<1x6xsi64>) : !torch.vtensor<[1,6],si64>
%295 = torch.aten.mul.Tensor %293, %294 : !torch.vtensor<[1,6],si64>, !torch.vtensor<[1,6],si64> -> !torch.vtensor<[1,6],si64>
%int5 = torch.constant.int 5
%none_35 = torch.constant.none
%false_36 = torch.constant.bool false
%296 = torch.aten.to.dtype %295, %int5, %false_36, %false_36, %none_35 : !torch.vtensor<[1,6],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,6],si64>
%297 = torch.vtensor.literal(dense_resource<__14> : tensor<si64>) : !torch.vtensor<[],si64>
%int1_37 = torch.constant.int 1
%298 = torch.aten.sub.Tensor %296, %297, %int1_37 : !torch.vtensor<[1,6],si64>, !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1,6],si64>
%299 = torch.vtensor.literal(dense_resource<__15> : tensor<si64>) : !torch.vtensor<[],si64>
%int1_38 = torch.constant.int 1
%300 = torch.aten.add.Tensor %298, %299, %int1_38 : !torch.vtensor<[1,6],si64>, !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1,6],si64>
%int1_39 = torch.constant.int 1
%int0_40 = torch.constant.int 0
%301 = torch.aten.size.int %300, %int0_40 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%302 = torch.aten.mul.int %int1_39, %301 : !torch.int, !torch.int -> !torch.int
%int1_41 = torch.constant.int 1
%303 = torch.aten.size.int %300, %int1_41 : !torch.vtensor<[1,6],si64>, !torch.int -> !torch.int
%304 = torch.aten.mul.int %302, %303 : !torch.int, !torch.int -> !torch.int
%int0_42 = torch.constant.int 0
%305 = torch.aten.size.int %1, %int0_42 : !torch.vtensor<[2050,768],f32>, !torch.int -> !torch.int
%int1_43 = torch.constant.int 1
%306 = torch.aten.size.int %1, %int1_43 : !torch.vtensor<[2050,768],f32>, !torch.int -> !torch.int
%307 = torch.prim.ListConstruct %304, %int1_39 : (!torch.int, !torch.int) -> !torch.list<int>
%308 = torch.aten.view %300, %307 : !torch.vtensor<[1,6],si64>, !torch.list<int> -> !torch.vtensor<[6,1],si64>
%int0_44 = torch.constant.int 0
%false_45 = torch.constant.bool false
%309 = torch.aten.gather %1, %int0_44, %308, %false_45 : !torch.vtensor<[2050,768],f32>, !torch.int, !torch.vtensor<[6,1],si64>, !torch.bool -> !torch.vtensor<[6,1],f32>
%310 = torch.prim.ListConstruct %304, %306 : (!torch.int, !torch.int) -> !torch.list<int>
%311 = torch.aten.expand %309, %310, %false_45 : !torch.vtensor<[6,1],f32>, !torch.list<int>, !torch.bool -> !torch.vtensor<[6,768],f32>
%312 = torch.prim.ListConstruct %301, %303, %306 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%313 = torch.aten.view %311, %312 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%int1_46 = torch.constant.int 1
%314 = torch.aten.add.Tensor %240, %313, %int1_46 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06 = torch.constant.float 9.9999997473787516E-6
%int768 = torch.constant.int 768
%315 = torch.prim.ListConstruct %int768 : (!torch.int) -> !torch.list<int>
%result0, %result1, %result2 = torch.aten.native_layer_norm %314, %315, %8, %9, %float9.999990e-06 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%316 = torch.aten.matmul %result0, %148 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_47 = torch.constant.int 1
%317 = torch.aten.add.Tensor %6, %316, %int1_47 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%318 = torch.vtensor.literal(dense_resource<__16> : tensor<f32>) : !torch.vtensor<[],f32>
%319 = torch.aten.mul.Tensor %317, %318 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%320 = torch.aten.matmul %result0, %149 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_48 = torch.constant.int 1
%321 = torch.aten.add.Tensor %4, %320, %int1_48 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%322 = torch.vtensor.literal(dense_resource<__17> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%323 = torch.vtensor.literal(dense_resource<__18> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_49 = torch.constant.int 0
%int0_50 = torch.constant.int 0
%324 = torch.aten.select.int %322, %int0_49, %int0_50 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%325 = torch.aten.item %324 : !torch.vtensor<[1],si64> -> !torch.int
%326 = torch.aten.eq.int %325, %int0_49 : !torch.int, !torch.int -> !torch.bool
%327 = torch.aten.Int.bool %326 : !torch.bool -> !torch.int
%int1_51 = torch.constant.int 1
%328 = torch.aten.mul.int %327, %int1_51 : !torch.int, !torch.int -> !torch.int
%329 = torch.aten.add.int %325, %328 : !torch.int, !torch.int -> !torch.int
%int1_52 = torch.constant.int 1
%330 = torch.aten.select.int %322, %int0_49, %int1_52 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%331 = torch.aten.item %330 : !torch.vtensor<[1],si64> -> !torch.int
%332 = torch.aten.eq.int %331, %int0_49 : !torch.int, !torch.int -> !torch.bool
%333 = torch.aten.Int.bool %332 : !torch.bool -> !torch.int
%int6_53 = torch.constant.int 6
%334 = torch.aten.mul.int %333, %int6_53 : !torch.int, !torch.int -> !torch.int
%335 = torch.aten.add.int %331, %334 : !torch.int, !torch.int -> !torch.int
%int2_54 = torch.constant.int 2
%336 = torch.aten.select.int %322, %int0_49, %int2_54 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%337 = torch.aten.item %336 : !torch.vtensor<[1],si64> -> !torch.int
%338 = torch.aten.eq.int %337, %int0_49 : !torch.int, !torch.int -> !torch.bool
%339 = torch.aten.Int.bool %338 : !torch.bool -> !torch.int
%int768_55 = torch.constant.int 768
%340 = torch.aten.mul.int %339, %int768_55 : !torch.int, !torch.int -> !torch.int
%341 = torch.aten.add.int %337, %340 : !torch.int, !torch.int -> !torch.int
%int3_56 = torch.constant.int 3
%342 = torch.aten.select.int %322, %int0_49, %int3_56 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%343 = torch.aten.item %342 : !torch.vtensor<[1],si64> -> !torch.int
%344 = torch.aten.eq.int %343, %int0_49 : !torch.int, !torch.int -> !torch.bool
%345 = torch.aten.Int.bool %344 : !torch.bool -> !torch.int
%346 = torch.aten.mul.int %345, %int0_49 : !torch.int, !torch.int -> !torch.int
%347 = torch.aten.add.int %343, %346 : !torch.int, !torch.int -> !torch.int
%348 = torch.prim.ListConstruct %329, %335, %341, %347 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%349 = torch.aten.reshape %321, %348 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_57 = torch.constant.int 1
%int2_58 = torch.constant.int 2
%350 = torch.aten.transpose.int %349, %int1_57, %int2_58 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%351 = torch.aten.matmul %result0, %150 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_59 = torch.constant.int 1
%352 = torch.aten.add.Tensor %5, %351, %int1_59 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_60 = torch.constant.int 0
%int0_61 = torch.constant.int 0
%353 = torch.aten.select.int %323, %int0_60, %int0_61 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%354 = torch.aten.item %353 : !torch.vtensor<[1],si64> -> !torch.int
%355 = torch.aten.eq.int %354, %int0_60 : !torch.int, !torch.int -> !torch.bool
%356 = torch.aten.Int.bool %355 : !torch.bool -> !torch.int
%int1_62 = torch.constant.int 1
%357 = torch.aten.mul.int %356, %int1_62 : !torch.int, !torch.int -> !torch.int
%358 = torch.aten.add.int %354, %357 : !torch.int, !torch.int -> !torch.int
%int1_63 = torch.constant.int 1
%359 = torch.aten.select.int %323, %int0_60, %int1_63 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%360 = torch.aten.item %359 : !torch.vtensor<[1],si64> -> !torch.int
%361 = torch.aten.eq.int %360, %int0_60 : !torch.int, !torch.int -> !torch.bool
%362 = torch.aten.Int.bool %361 : !torch.bool -> !torch.int
%int6_64 = torch.constant.int 6
%363 = torch.aten.mul.int %362, %int6_64 : !torch.int, !torch.int -> !torch.int
%364 = torch.aten.add.int %360, %363 : !torch.int, !torch.int -> !torch.int
%int2_65 = torch.constant.int 2
%365 = torch.aten.select.int %323, %int0_60, %int2_65 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%366 = torch.aten.item %365 : !torch.vtensor<[1],si64> -> !torch.int
%367 = torch.aten.eq.int %366, %int0_60 : !torch.int, !torch.int -> !torch.bool
%368 = torch.aten.Int.bool %367 : !torch.bool -> !torch.int
%int768_66 = torch.constant.int 768
%369 = torch.aten.mul.int %368, %int768_66 : !torch.int, !torch.int -> !torch.int
%370 = torch.aten.add.int %366, %369 : !torch.int, !torch.int -> !torch.int
%int3_67 = torch.constant.int 3
%371 = torch.aten.select.int %323, %int0_60, %int3_67 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%372 = torch.aten.item %371 : !torch.vtensor<[1],si64> -> !torch.int
%373 = torch.aten.eq.int %372, %int0_60 : !torch.int, !torch.int -> !torch.bool
%374 = torch.aten.Int.bool %373 : !torch.bool -> !torch.int
%375 = torch.aten.mul.int %374, %int0_60 : !torch.int, !torch.int -> !torch.int
%376 = torch.aten.add.int %372, %375 : !torch.int, !torch.int -> !torch.int
%377 = torch.prim.ListConstruct %358, %364, %370, %376 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%378 = torch.aten.reshape %352, %377 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_68 = torch.constant.int 1
%int2_69 = torch.constant.int 2
%379 = torch.aten.transpose.int %378, %int1_68, %int2_69 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%380 = torch.vtensor.literal(dense_resource<__19> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_70 = torch.constant.int 0
%int0_71 = torch.constant.int 0
%381 = torch.aten.select.int %380, %int0_70, %int0_71 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%382 = torch.aten.item %381 : !torch.vtensor<[1],si64> -> !torch.int
%383 = torch.aten.eq.int %382, %int0_70 : !torch.int, !torch.int -> !torch.bool
%384 = torch.aten.Int.bool %383 : !torch.bool -> !torch.int
%int1_72 = torch.constant.int 1
%385 = torch.aten.mul.int %384, %int1_72 : !torch.int, !torch.int -> !torch.int
%386 = torch.aten.add.int %382, %385 : !torch.int, !torch.int -> !torch.int
%int1_73 = torch.constant.int 1
%387 = torch.aten.select.int %380, %int0_70, %int1_73 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%388 = torch.aten.item %387 : !torch.vtensor<[1],si64> -> !torch.int
%389 = torch.aten.eq.int %388, %int0_70 : !torch.int, !torch.int -> !torch.bool
%390 = torch.aten.Int.bool %389 : !torch.bool -> !torch.int
%int6_74 = torch.constant.int 6
%391 = torch.aten.mul.int %390, %int6_74 : !torch.int, !torch.int -> !torch.int
%392 = torch.aten.add.int %388, %391 : !torch.int, !torch.int -> !torch.int
%int2_75 = torch.constant.int 2
%393 = torch.aten.select.int %380, %int0_70, %int2_75 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%394 = torch.aten.item %393 : !torch.vtensor<[1],si64> -> !torch.int
%395 = torch.aten.eq.int %394, %int0_70 : !torch.int, !torch.int -> !torch.bool
%396 = torch.aten.Int.bool %395 : !torch.bool -> !torch.int
%int768_76 = torch.constant.int 768
%397 = torch.aten.mul.int %396, %int768_76 : !torch.int, !torch.int -> !torch.int
%398 = torch.aten.add.int %394, %397 : !torch.int, !torch.int -> !torch.int
%int3_77 = torch.constant.int 3
%399 = torch.aten.select.int %380, %int0_70, %int3_77 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%400 = torch.aten.item %399 : !torch.vtensor<[1],si64> -> !torch.int
%401 = torch.aten.eq.int %400, %int0_70 : !torch.int, !torch.int -> !torch.bool
%402 = torch.aten.Int.bool %401 : !torch.bool -> !torch.int
%403 = torch.aten.mul.int %402, %int0_70 : !torch.int, !torch.int -> !torch.int
%404 = torch.aten.add.int %400, %403 : !torch.int, !torch.int -> !torch.int
%405 = torch.prim.ListConstruct %386, %392, %398, %404 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%406 = torch.aten.reshape %319, %405 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_78 = torch.constant.int 1
%int2_79 = torch.constant.int 2
%407 = torch.aten.transpose.int %406, %int1_78, %int2_79 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%408 = torch.vtensor.literal(dense_resource<__20> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%409 = torch.vtensor.literal(dense_resource<__21> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%410 = torch.vtensor.literal(dense_resource<__22> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_80 = torch.constant.int 0
%int0_81 = torch.constant.int 0
%411 = torch.aten.select.int %408, %int0_80, %int0_81 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%412 = torch.aten.item %411 : !torch.vtensor<[1],si64> -> !torch.int
%413 = torch.aten.eq.int %412, %int0_80 : !torch.int, !torch.int -> !torch.bool
%414 = torch.aten.Int.bool %413 : !torch.bool -> !torch.int
%int1_82 = torch.constant.int 1
%415 = torch.aten.mul.int %414, %int1_82 : !torch.int, !torch.int -> !torch.int
%416 = torch.aten.add.int %412, %415 : !torch.int, !torch.int -> !torch.int
%int1_83 = torch.constant.int 1
%417 = torch.aten.select.int %408, %int0_80, %int1_83 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%418 = torch.aten.item %417 : !torch.vtensor<[1],si64> -> !torch.int
%419 = torch.aten.eq.int %418, %int0_80 : !torch.int, !torch.int -> !torch.bool
%420 = torch.aten.Int.bool %419 : !torch.bool -> !torch.int
%int12 = torch.constant.int 12
%421 = torch.aten.mul.int %420, %int12 : !torch.int, !torch.int -> !torch.int
%422 = torch.aten.add.int %418, %421 : !torch.int, !torch.int -> !torch.int
%int2_84 = torch.constant.int 2
%423 = torch.aten.select.int %408, %int0_80, %int2_84 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%424 = torch.aten.item %423 : !torch.vtensor<[1],si64> -> !torch.int
%425 = torch.aten.eq.int %424, %int0_80 : !torch.int, !torch.int -> !torch.bool
%426 = torch.aten.Int.bool %425 : !torch.bool -> !torch.int
%int6_85 = torch.constant.int 6
%427 = torch.aten.mul.int %426, %int6_85 : !torch.int, !torch.int -> !torch.int
%428 = torch.aten.add.int %424, %427 : !torch.int, !torch.int -> !torch.int
%429 = torch.prim.ListConstruct %416, %422, %428 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%430 = torch.aten.reshape %407, %429 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_86 = torch.constant.int 0
%int0_87 = torch.constant.int 0
%431 = torch.aten.select.int %409, %int0_86, %int0_87 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%432 = torch.aten.item %431 : !torch.vtensor<[1],si64> -> !torch.int
%433 = torch.aten.eq.int %432, %int0_86 : !torch.int, !torch.int -> !torch.bool
%434 = torch.aten.Int.bool %433 : !torch.bool -> !torch.int
%int1_88 = torch.constant.int 1
%435 = torch.aten.mul.int %434, %int1_88 : !torch.int, !torch.int -> !torch.int
%436 = torch.aten.add.int %432, %435 : !torch.int, !torch.int -> !torch.int
%int1_89 = torch.constant.int 1
%437 = torch.aten.select.int %409, %int0_86, %int1_89 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%438 = torch.aten.item %437 : !torch.vtensor<[1],si64> -> !torch.int
%439 = torch.aten.eq.int %438, %int0_86 : !torch.int, !torch.int -> !torch.bool
%440 = torch.aten.Int.bool %439 : !torch.bool -> !torch.int
%int12_90 = torch.constant.int 12
%441 = torch.aten.mul.int %440, %int12_90 : !torch.int, !torch.int -> !torch.int
%442 = torch.aten.add.int %438, %441 : !torch.int, !torch.int -> !torch.int
%int2_91 = torch.constant.int 2
%443 = torch.aten.select.int %409, %int0_86, %int2_91 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%444 = torch.aten.item %443 : !torch.vtensor<[1],si64> -> !torch.int
%445 = torch.aten.eq.int %444, %int0_86 : !torch.int, !torch.int -> !torch.bool
%446 = torch.aten.Int.bool %445 : !torch.bool -> !torch.int
%int6_92 = torch.constant.int 6
%447 = torch.aten.mul.int %446, %int6_92 : !torch.int, !torch.int -> !torch.int
%448 = torch.aten.add.int %444, %447 : !torch.int, !torch.int -> !torch.int
%449 = torch.prim.ListConstruct %436, %442, %448 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%450 = torch.aten.reshape %350, %449 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_93 = torch.constant.int 0
%int0_94 = torch.constant.int 0
%451 = torch.aten.select.int %410, %int0_93, %int0_94 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%452 = torch.aten.item %451 : !torch.vtensor<[1],si64> -> !torch.int
%453 = torch.aten.eq.int %452, %int0_93 : !torch.int, !torch.int -> !torch.bool
%454 = torch.aten.Int.bool %453 : !torch.bool -> !torch.int
%int1_95 = torch.constant.int 1
%455 = torch.aten.mul.int %454, %int1_95 : !torch.int, !torch.int -> !torch.int
%456 = torch.aten.add.int %452, %455 : !torch.int, !torch.int -> !torch.int
%int1_96 = torch.constant.int 1
%457 = torch.aten.select.int %410, %int0_93, %int1_96 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%458 = torch.aten.item %457 : !torch.vtensor<[1],si64> -> !torch.int
%459 = torch.aten.eq.int %458, %int0_93 : !torch.int, !torch.int -> !torch.bool
%460 = torch.aten.Int.bool %459 : !torch.bool -> !torch.int
%int12_97 = torch.constant.int 12
%461 = torch.aten.mul.int %460, %int12_97 : !torch.int, !torch.int -> !torch.int
%462 = torch.aten.add.int %458, %461 : !torch.int, !torch.int -> !torch.int
%int2_98 = torch.constant.int 2
%463 = torch.aten.select.int %410, %int0_93, %int2_98 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%464 = torch.aten.item %463 : !torch.vtensor<[1],si64> -> !torch.int
%465 = torch.aten.eq.int %464, %int0_93 : !torch.int, !torch.int -> !torch.bool
%466 = torch.aten.Int.bool %465 : !torch.bool -> !torch.int
%int6_99 = torch.constant.int 6
%467 = torch.aten.mul.int %466, %int6_99 : !torch.int, !torch.int -> !torch.int
%468 = torch.aten.add.int %464, %467 : !torch.int, !torch.int -> !torch.int
%469 = torch.prim.ListConstruct %456, %462, %468 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%470 = torch.aten.reshape %379, %469 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_100 = torch.constant.int 1
%int2_101 = torch.constant.int 2
%471 = torch.aten.transpose.int %450, %int1_100, %int2_101 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%472 = torch.aten.matmul %430, %471 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%473 = torch.vtensor.literal(dense_resource<__23> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_102 = torch.constant.int 0
%int0_103 = torch.constant.int 0
%474 = torch.aten.select.int %473, %int0_102, %int0_103 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%475 = torch.aten.item %474 : !torch.vtensor<[1],si64> -> !torch.int
%476 = torch.aten.eq.int %475, %int0_102 : !torch.int, !torch.int -> !torch.bool
%477 = torch.aten.Int.bool %476 : !torch.bool -> !torch.int
%int12_104 = torch.constant.int 12
%478 = torch.aten.mul.int %477, %int12_104 : !torch.int, !torch.int -> !torch.int
%479 = torch.aten.add.int %475, %478 : !torch.int, !torch.int -> !torch.int
%int1_105 = torch.constant.int 1
%480 = torch.aten.select.int %473, %int0_102, %int1_105 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%481 = torch.aten.item %480 : !torch.vtensor<[1],si64> -> !torch.int
%482 = torch.aten.eq.int %481, %int0_102 : !torch.int, !torch.int -> !torch.bool
%483 = torch.aten.Int.bool %482 : !torch.bool -> !torch.int
%int6_106 = torch.constant.int 6
%484 = torch.aten.mul.int %483, %int6_106 : !torch.int, !torch.int -> !torch.int
%485 = torch.aten.add.int %481, %484 : !torch.int, !torch.int -> !torch.int
%int2_107 = torch.constant.int 2
%486 = torch.aten.select.int %473, %int0_102, %int2_107 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%487 = torch.aten.item %486 : !torch.vtensor<[1],si64> -> !torch.int
%488 = torch.aten.eq.int %487, %int0_102 : !torch.int, !torch.int -> !torch.bool
%489 = torch.aten.Int.bool %488 : !torch.bool -> !torch.int
%int6_108 = torch.constant.int 6
%490 = torch.aten.mul.int %489, %int6_108 : !torch.int, !torch.int -> !torch.int
%491 = torch.aten.add.int %487, %490 : !torch.int, !torch.int -> !torch.int
%int3_109 = torch.constant.int 3
%492 = torch.aten.select.int %473, %int0_102, %int3_109 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%493 = torch.aten.item %492 : !torch.vtensor<[1],si64> -> !torch.int
%494 = torch.aten.eq.int %493, %int0_102 : !torch.int, !torch.int -> !torch.bool
%495 = torch.aten.Int.bool %494 : !torch.bool -> !torch.int
%496 = torch.aten.mul.int %495, %int0_102 : !torch.int, !torch.int -> !torch.int
%497 = torch.aten.add.int %493, %496 : !torch.int, !torch.int -> !torch.int
%498 = torch.prim.ListConstruct %479, %485, %491, %497 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%499 = torch.aten.reshape %472, %498 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_110 = torch.constant.int 1
%500 = torch.aten.add.Tensor %499, %285, %int1_110 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%501 = torch.vtensor.literal(dense_resource<__24> : tensor<f32>) : !torch.vtensor<[],f32>
%502 = torch.aten.maximum %500, %501 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%503 = torch.vtensor.literal(dense_resource<__25> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_111 = torch.constant.int 0
%int0_112 = torch.constant.int 0
%504 = torch.aten.select.int %503, %int0_111, %int0_112 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%505 = torch.aten.item %504 : !torch.vtensor<[1],si64> -> !torch.int
%506 = torch.aten.eq.int %505, %int0_111 : !torch.int, !torch.int -> !torch.bool
%507 = torch.aten.Int.bool %506 : !torch.bool -> !torch.int
%int-1 = torch.constant.int -1
%508 = torch.aten.mul.int %507, %int-1 : !torch.int, !torch.int -> !torch.int
%509 = torch.aten.add.int %505, %508 : !torch.int, !torch.int -> !torch.int
%int1_113 = torch.constant.int 1
%510 = torch.aten.select.int %503, %int0_111, %int1_113 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%511 = torch.aten.item %510 : !torch.vtensor<[1],si64> -> !torch.int
%512 = torch.aten.eq.int %511, %int0_111 : !torch.int, !torch.int -> !torch.bool
%513 = torch.aten.Int.bool %512 : !torch.bool -> !torch.int
%int12_114 = torch.constant.int 12
%514 = torch.aten.mul.int %513, %int12_114 : !torch.int, !torch.int -> !torch.int
%515 = torch.aten.add.int %511, %514 : !torch.int, !torch.int -> !torch.int
%int2_115 = torch.constant.int 2
%516 = torch.aten.select.int %503, %int0_111, %int2_115 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%517 = torch.aten.item %516 : !torch.vtensor<[1],si64> -> !torch.int
%518 = torch.aten.eq.int %517, %int0_111 : !torch.int, !torch.int -> !torch.bool
%519 = torch.aten.Int.bool %518 : !torch.bool -> !torch.int
%int6_116 = torch.constant.int 6
%520 = torch.aten.mul.int %519, %int6_116 : !torch.int, !torch.int -> !torch.int
%521 = torch.aten.add.int %517, %520 : !torch.int, !torch.int -> !torch.int
%522 = torch.prim.ListConstruct %509, %515, %521 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%523 = torch.aten.reshape %502, %522 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_117 = torch.constant.int 2
%none_118 = torch.constant.none
%524 = torch.aten.softmax.int %523, %int2_117, %none_118 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%525 = torch.aten.matmul %524, %470 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%526 = torch.vtensor.literal(dense_resource<__26> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_119 = torch.constant.int 0
%int0_120 = torch.constant.int 0
%527 = torch.aten.select.int %526, %int0_119, %int0_120 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%528 = torch.aten.item %527 : !torch.vtensor<[1],si64> -> !torch.int
%529 = torch.aten.eq.int %528, %int0_119 : !torch.int, !torch.int -> !torch.bool
%530 = torch.aten.Int.bool %529 : !torch.bool -> !torch.int
%int12_121 = torch.constant.int 12
%531 = torch.aten.mul.int %530, %int12_121 : !torch.int, !torch.int -> !torch.int
%532 = torch.aten.add.int %528, %531 : !torch.int, !torch.int -> !torch.int
%int1_122 = torch.constant.int 1
%533 = torch.aten.select.int %526, %int0_119, %int1_122 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%534 = torch.aten.item %533 : !torch.vtensor<[1],si64> -> !torch.int
%535 = torch.aten.eq.int %534, %int0_119 : !torch.int, !torch.int -> !torch.bool
%536 = torch.aten.Int.bool %535 : !torch.bool -> !torch.int
%int6_123 = torch.constant.int 6
%537 = torch.aten.mul.int %536, %int6_123 : !torch.int, !torch.int -> !torch.int
%538 = torch.aten.add.int %534, %537 : !torch.int, !torch.int -> !torch.int
%int2_124 = torch.constant.int 2
%539 = torch.aten.select.int %526, %int0_119, %int2_124 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%540 = torch.aten.item %539 : !torch.vtensor<[1],si64> -> !torch.int
%541 = torch.aten.eq.int %540, %int0_119 : !torch.int, !torch.int -> !torch.bool
%542 = torch.aten.Int.bool %541 : !torch.bool -> !torch.int
%int64 = torch.constant.int 64
%543 = torch.aten.mul.int %542, %int64 : !torch.int, !torch.int -> !torch.int
%544 = torch.aten.add.int %540, %543 : !torch.int, !torch.int -> !torch.int
%int3_125 = torch.constant.int 3
%545 = torch.aten.select.int %526, %int0_119, %int3_125 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%546 = torch.aten.item %545 : !torch.vtensor<[1],si64> -> !torch.int
%547 = torch.aten.eq.int %546, %int0_119 : !torch.int, !torch.int -> !torch.bool
%548 = torch.aten.Int.bool %547 : !torch.bool -> !torch.int
%549 = torch.aten.mul.int %548, %int0_119 : !torch.int, !torch.int -> !torch.int
%550 = torch.aten.add.int %546, %549 : !torch.int, !torch.int -> !torch.int
%551 = torch.prim.ListConstruct %532, %538, %544, %550 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%552 = torch.aten.reshape %525, %551 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_126 = torch.constant.int 1
%int2_127 = torch.constant.int 2
%553 = torch.aten.transpose.int %552, %int1_126, %int2_127 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%554 = torch.vtensor.literal(dense_resource<__27> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_128 = torch.constant.int 0
%int0_129 = torch.constant.int 0
%555 = torch.aten.select.int %554, %int0_128, %int0_129 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%556 = torch.aten.item %555 : !torch.vtensor<[1],si64> -> !torch.int
%557 = torch.aten.eq.int %556, %int0_128 : !torch.int, !torch.int -> !torch.bool
%558 = torch.aten.Int.bool %557 : !torch.bool -> !torch.int
%int1_130 = torch.constant.int 1
%559 = torch.aten.mul.int %558, %int1_130 : !torch.int, !torch.int -> !torch.int
%560 = torch.aten.add.int %556, %559 : !torch.int, !torch.int -> !torch.int
%int1_131 = torch.constant.int 1
%561 = torch.aten.select.int %554, %int0_128, %int1_131 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%562 = torch.aten.item %561 : !torch.vtensor<[1],si64> -> !torch.int
%563 = torch.aten.eq.int %562, %int0_128 : !torch.int, !torch.int -> !torch.bool
%564 = torch.aten.Int.bool %563 : !torch.bool -> !torch.int
%int6_132 = torch.constant.int 6
%565 = torch.aten.mul.int %564, %int6_132 : !torch.int, !torch.int -> !torch.int
%566 = torch.aten.add.int %562, %565 : !torch.int, !torch.int -> !torch.int
%int2_133 = torch.constant.int 2
%567 = torch.aten.select.int %554, %int0_128, %int2_133 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%568 = torch.aten.item %567 : !torch.vtensor<[1],si64> -> !torch.int
%569 = torch.aten.eq.int %568, %int0_128 : !torch.int, !torch.int -> !torch.bool
%570 = torch.aten.Int.bool %569 : !torch.bool -> !torch.int
%int12_134 = torch.constant.int 12
%571 = torch.aten.mul.int %570, %int12_134 : !torch.int, !torch.int -> !torch.int
%572 = torch.aten.add.int %568, %571 : !torch.int, !torch.int -> !torch.int
%573 = torch.prim.ListConstruct %560, %566, %572 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%574 = torch.aten.reshape %553, %573 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%575 = torch.aten.matmul %574, %151 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_135 = torch.constant.int 1
%576 = torch.aten.add.Tensor %7, %575, %int1_135 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_136 = torch.constant.int 1
%577 = torch.aten.add.Tensor %314, %576, %int1_136 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%578 = torch.vtensor.literal(dense_resource<__28> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_137 = torch.constant.int 0
%int0_138 = torch.constant.int 0
%579 = torch.aten.select.int %578, %int0_137, %int0_138 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%580 = torch.aten.item %579 : !torch.vtensor<[1],si64> -> !torch.int
%581 = torch.aten.eq.int %580, %int0_137 : !torch.int, !torch.int -> !torch.bool
%582 = torch.aten.Int.bool %581 : !torch.bool -> !torch.int
%int1_139 = torch.constant.int 1
%583 = torch.aten.mul.int %582, %int1_139 : !torch.int, !torch.int -> !torch.int
%584 = torch.aten.add.int %580, %583 : !torch.int, !torch.int -> !torch.int
%int1_140 = torch.constant.int 1
%585 = torch.aten.select.int %578, %int0_137, %int1_140 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%586 = torch.aten.item %585 : !torch.vtensor<[1],si64> -> !torch.int
%587 = torch.aten.eq.int %586, %int0_137 : !torch.int, !torch.int -> !torch.bool
%588 = torch.aten.Int.bool %587 : !torch.bool -> !torch.int
%int6_141 = torch.constant.int 6
%589 = torch.aten.mul.int %588, %int6_141 : !torch.int, !torch.int -> !torch.int
%590 = torch.aten.add.int %586, %589 : !torch.int, !torch.int -> !torch.int
%591 = torch.prim.ListConstruct %584, %590 : (!torch.int, !torch.int) -> !torch.list<int>
%592 = torch.aten.reshape %577, %591 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_142 = torch.constant.float 9.9999997473787516E-6
%int768_143 = torch.constant.int 768
%593 = torch.prim.ListConstruct %int768_143 : (!torch.int) -> !torch.list<int>
%result0_144, %result1_145, %result2_146 = torch.aten.native_layer_norm %592, %593, %14, %15, %float9.999990e-06_142 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_147 = torch.constant.int 0
%int1_148 = torch.constant.int 1
%594 = torch.aten.transpose.int %10, %int0_147, %int1_148 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%595 = torch.aten.mm %result0_144, %594 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%596 = torch.aten.add.Tensor %595, %11, %int1_148 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%597 = torch.aten.relu %596 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_149 = torch.constant.int 0
%int1_150 = torch.constant.int 1
%598 = torch.aten.transpose.int %12, %int0_149, %int1_150 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%599 = torch.aten.mm %597, %598 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%600 = torch.aten.add.Tensor %599, %13, %int1_150 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_151 = torch.constant.int 1
%601 = torch.aten.add.Tensor %592, %600, %int1_151 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%602 = torch.vtensor.literal(dense_resource<__29> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_152 = torch.constant.int 0
%int0_153 = torch.constant.int 0
%603 = torch.aten.select.int %602, %int0_152, %int0_153 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%604 = torch.aten.item %603 : !torch.vtensor<[1],si64> -> !torch.int
%605 = torch.aten.eq.int %604, %int0_152 : !torch.int, !torch.int -> !torch.bool
%606 = torch.aten.Int.bool %605 : !torch.bool -> !torch.int
%int6_154 = torch.constant.int 6
%607 = torch.aten.mul.int %606, %int6_154 : !torch.int, !torch.int -> !torch.int
%608 = torch.aten.add.int %604, %607 : !torch.int, !torch.int -> !torch.int
%int1_155 = torch.constant.int 1
%609 = torch.aten.select.int %602, %int0_152, %int1_155 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%610 = torch.aten.item %609 : !torch.vtensor<[1],si64> -> !torch.int
%611 = torch.aten.eq.int %610, %int0_152 : !torch.int, !torch.int -> !torch.bool
%612 = torch.aten.Int.bool %611 : !torch.bool -> !torch.int
%int768_156 = torch.constant.int 768
%613 = torch.aten.mul.int %612, %int768_156 : !torch.int, !torch.int -> !torch.int
%614 = torch.aten.add.int %610, %613 : !torch.int, !torch.int -> !torch.int
%int2_157 = torch.constant.int 2
%615 = torch.aten.select.int %602, %int0_152, %int2_157 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%616 = torch.aten.item %615 : !torch.vtensor<[1],si64> -> !torch.int
%617 = torch.aten.eq.int %616, %int0_152 : !torch.int, !torch.int -> !torch.bool
%618 = torch.aten.Int.bool %617 : !torch.bool -> !torch.int
%619 = torch.aten.mul.int %618, %int0_152 : !torch.int, !torch.int -> !torch.int
%620 = torch.aten.add.int %616, %619 : !torch.int, !torch.int -> !torch.int
%621 = torch.prim.ListConstruct %608, %614, %620 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%622 = torch.aten.reshape %601, %621 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_158 = torch.constant.float 9.9999997473787516E-6
%int768_159 = torch.constant.int 768
%623 = torch.prim.ListConstruct %int768_159 : (!torch.int) -> !torch.list<int>
%result0_160, %result1_161, %result2_162 = torch.aten.native_layer_norm %622, %623, %20, %21, %float9.999990e-06_158 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%624 = torch.aten.matmul %result0_160, %152 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_163 = torch.constant.int 1
%625 = torch.aten.add.Tensor %18, %624, %int1_163 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%626 = torch.vtensor.literal(dense_resource<__30> : tensor<f32>) : !torch.vtensor<[],f32>
%627 = torch.aten.mul.Tensor %625, %626 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%628 = torch.aten.matmul %result0_160, %153 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_164 = torch.constant.int 1
%629 = torch.aten.add.Tensor %16, %628, %int1_164 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%630 = torch.vtensor.literal(dense_resource<__31> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%631 = torch.vtensor.literal(dense_resource<__32> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_165 = torch.constant.int 0
%int0_166 = torch.constant.int 0
%632 = torch.aten.select.int %630, %int0_165, %int0_166 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%633 = torch.aten.item %632 : !torch.vtensor<[1],si64> -> !torch.int
%634 = torch.aten.eq.int %633, %int0_165 : !torch.int, !torch.int -> !torch.bool
%635 = torch.aten.Int.bool %634 : !torch.bool -> !torch.int
%int1_167 = torch.constant.int 1
%636 = torch.aten.mul.int %635, %int1_167 : !torch.int, !torch.int -> !torch.int
%637 = torch.aten.add.int %633, %636 : !torch.int, !torch.int -> !torch.int
%int1_168 = torch.constant.int 1
%638 = torch.aten.select.int %630, %int0_165, %int1_168 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%639 = torch.aten.item %638 : !torch.vtensor<[1],si64> -> !torch.int
%640 = torch.aten.eq.int %639, %int0_165 : !torch.int, !torch.int -> !torch.bool
%641 = torch.aten.Int.bool %640 : !torch.bool -> !torch.int
%int6_169 = torch.constant.int 6
%642 = torch.aten.mul.int %641, %int6_169 : !torch.int, !torch.int -> !torch.int
%643 = torch.aten.add.int %639, %642 : !torch.int, !torch.int -> !torch.int
%int2_170 = torch.constant.int 2
%644 = torch.aten.select.int %630, %int0_165, %int2_170 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%645 = torch.aten.item %644 : !torch.vtensor<[1],si64> -> !torch.int
%646 = torch.aten.eq.int %645, %int0_165 : !torch.int, !torch.int -> !torch.bool
%647 = torch.aten.Int.bool %646 : !torch.bool -> !torch.int
%int768_171 = torch.constant.int 768
%648 = torch.aten.mul.int %647, %int768_171 : !torch.int, !torch.int -> !torch.int
%649 = torch.aten.add.int %645, %648 : !torch.int, !torch.int -> !torch.int
%int3_172 = torch.constant.int 3
%650 = torch.aten.select.int %630, %int0_165, %int3_172 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%651 = torch.aten.item %650 : !torch.vtensor<[1],si64> -> !torch.int
%652 = torch.aten.eq.int %651, %int0_165 : !torch.int, !torch.int -> !torch.bool
%653 = torch.aten.Int.bool %652 : !torch.bool -> !torch.int
%654 = torch.aten.mul.int %653, %int0_165 : !torch.int, !torch.int -> !torch.int
%655 = torch.aten.add.int %651, %654 : !torch.int, !torch.int -> !torch.int
%656 = torch.prim.ListConstruct %637, %643, %649, %655 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%657 = torch.aten.reshape %629, %656 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_173 = torch.constant.int 1
%int2_174 = torch.constant.int 2
%658 = torch.aten.transpose.int %657, %int1_173, %int2_174 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%659 = torch.aten.matmul %result0_160, %154 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_175 = torch.constant.int 1
%660 = torch.aten.add.Tensor %17, %659, %int1_175 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_176 = torch.constant.int 0
%int0_177 = torch.constant.int 0
%661 = torch.aten.select.int %631, %int0_176, %int0_177 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%662 = torch.aten.item %661 : !torch.vtensor<[1],si64> -> !torch.int
%663 = torch.aten.eq.int %662, %int0_176 : !torch.int, !torch.int -> !torch.bool
%664 = torch.aten.Int.bool %663 : !torch.bool -> !torch.int
%int1_178 = torch.constant.int 1
%665 = torch.aten.mul.int %664, %int1_178 : !torch.int, !torch.int -> !torch.int
%666 = torch.aten.add.int %662, %665 : !torch.int, !torch.int -> !torch.int
%int1_179 = torch.constant.int 1
%667 = torch.aten.select.int %631, %int0_176, %int1_179 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%668 = torch.aten.item %667 : !torch.vtensor<[1],si64> -> !torch.int
%669 = torch.aten.eq.int %668, %int0_176 : !torch.int, !torch.int -> !torch.bool
%670 = torch.aten.Int.bool %669 : !torch.bool -> !torch.int
%int6_180 = torch.constant.int 6
%671 = torch.aten.mul.int %670, %int6_180 : !torch.int, !torch.int -> !torch.int
%672 = torch.aten.add.int %668, %671 : !torch.int, !torch.int -> !torch.int
%int2_181 = torch.constant.int 2
%673 = torch.aten.select.int %631, %int0_176, %int2_181 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%674 = torch.aten.item %673 : !torch.vtensor<[1],si64> -> !torch.int
%675 = torch.aten.eq.int %674, %int0_176 : !torch.int, !torch.int -> !torch.bool
%676 = torch.aten.Int.bool %675 : !torch.bool -> !torch.int
%int768_182 = torch.constant.int 768
%677 = torch.aten.mul.int %676, %int768_182 : !torch.int, !torch.int -> !torch.int
%678 = torch.aten.add.int %674, %677 : !torch.int, !torch.int -> !torch.int
%int3_183 = torch.constant.int 3
%679 = torch.aten.select.int %631, %int0_176, %int3_183 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%680 = torch.aten.item %679 : !torch.vtensor<[1],si64> -> !torch.int
%681 = torch.aten.eq.int %680, %int0_176 : !torch.int, !torch.int -> !torch.bool
%682 = torch.aten.Int.bool %681 : !torch.bool -> !torch.int
%683 = torch.aten.mul.int %682, %int0_176 : !torch.int, !torch.int -> !torch.int
%684 = torch.aten.add.int %680, %683 : !torch.int, !torch.int -> !torch.int
%685 = torch.prim.ListConstruct %666, %672, %678, %684 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%686 = torch.aten.reshape %660, %685 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_184 = torch.constant.int 1
%int2_185 = torch.constant.int 2
%687 = torch.aten.transpose.int %686, %int1_184, %int2_185 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%688 = torch.vtensor.literal(dense_resource<__33> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_186 = torch.constant.int 0
%int0_187 = torch.constant.int 0
%689 = torch.aten.select.int %688, %int0_186, %int0_187 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%690 = torch.aten.item %689 : !torch.vtensor<[1],si64> -> !torch.int
%691 = torch.aten.eq.int %690, %int0_186 : !torch.int, !torch.int -> !torch.bool
%692 = torch.aten.Int.bool %691 : !torch.bool -> !torch.int
%int1_188 = torch.constant.int 1
%693 = torch.aten.mul.int %692, %int1_188 : !torch.int, !torch.int -> !torch.int
%694 = torch.aten.add.int %690, %693 : !torch.int, !torch.int -> !torch.int
%int1_189 = torch.constant.int 1
%695 = torch.aten.select.int %688, %int0_186, %int1_189 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%696 = torch.aten.item %695 : !torch.vtensor<[1],si64> -> !torch.int
%697 = torch.aten.eq.int %696, %int0_186 : !torch.int, !torch.int -> !torch.bool
%698 = torch.aten.Int.bool %697 : !torch.bool -> !torch.int
%int6_190 = torch.constant.int 6
%699 = torch.aten.mul.int %698, %int6_190 : !torch.int, !torch.int -> !torch.int
%700 = torch.aten.add.int %696, %699 : !torch.int, !torch.int -> !torch.int
%int2_191 = torch.constant.int 2
%701 = torch.aten.select.int %688, %int0_186, %int2_191 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%702 = torch.aten.item %701 : !torch.vtensor<[1],si64> -> !torch.int
%703 = torch.aten.eq.int %702, %int0_186 : !torch.int, !torch.int -> !torch.bool
%704 = torch.aten.Int.bool %703 : !torch.bool -> !torch.int
%int768_192 = torch.constant.int 768
%705 = torch.aten.mul.int %704, %int768_192 : !torch.int, !torch.int -> !torch.int
%706 = torch.aten.add.int %702, %705 : !torch.int, !torch.int -> !torch.int
%int3_193 = torch.constant.int 3
%707 = torch.aten.select.int %688, %int0_186, %int3_193 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%708 = torch.aten.item %707 : !torch.vtensor<[1],si64> -> !torch.int
%709 = torch.aten.eq.int %708, %int0_186 : !torch.int, !torch.int -> !torch.bool
%710 = torch.aten.Int.bool %709 : !torch.bool -> !torch.int
%711 = torch.aten.mul.int %710, %int0_186 : !torch.int, !torch.int -> !torch.int
%712 = torch.aten.add.int %708, %711 : !torch.int, !torch.int -> !torch.int
%713 = torch.prim.ListConstruct %694, %700, %706, %712 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%714 = torch.aten.reshape %627, %713 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_194 = torch.constant.int 1
%int2_195 = torch.constant.int 2
%715 = torch.aten.transpose.int %714, %int1_194, %int2_195 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%716 = torch.vtensor.literal(dense_resource<__34> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%717 = torch.vtensor.literal(dense_resource<__35> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%718 = torch.vtensor.literal(dense_resource<__36> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_196 = torch.constant.int 0
%int0_197 = torch.constant.int 0
%719 = torch.aten.select.int %716, %int0_196, %int0_197 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%720 = torch.aten.item %719 : !torch.vtensor<[1],si64> -> !torch.int
%721 = torch.aten.eq.int %720, %int0_196 : !torch.int, !torch.int -> !torch.bool
%722 = torch.aten.Int.bool %721 : !torch.bool -> !torch.int
%int1_198 = torch.constant.int 1
%723 = torch.aten.mul.int %722, %int1_198 : !torch.int, !torch.int -> !torch.int
%724 = torch.aten.add.int %720, %723 : !torch.int, !torch.int -> !torch.int
%int1_199 = torch.constant.int 1
%725 = torch.aten.select.int %716, %int0_196, %int1_199 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%726 = torch.aten.item %725 : !torch.vtensor<[1],si64> -> !torch.int
%727 = torch.aten.eq.int %726, %int0_196 : !torch.int, !torch.int -> !torch.bool
%728 = torch.aten.Int.bool %727 : !torch.bool -> !torch.int
%int12_200 = torch.constant.int 12
%729 = torch.aten.mul.int %728, %int12_200 : !torch.int, !torch.int -> !torch.int
%730 = torch.aten.add.int %726, %729 : !torch.int, !torch.int -> !torch.int
%int2_201 = torch.constant.int 2
%731 = torch.aten.select.int %716, %int0_196, %int2_201 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%732 = torch.aten.item %731 : !torch.vtensor<[1],si64> -> !torch.int
%733 = torch.aten.eq.int %732, %int0_196 : !torch.int, !torch.int -> !torch.bool
%734 = torch.aten.Int.bool %733 : !torch.bool -> !torch.int
%int6_202 = torch.constant.int 6
%735 = torch.aten.mul.int %734, %int6_202 : !torch.int, !torch.int -> !torch.int
%736 = torch.aten.add.int %732, %735 : !torch.int, !torch.int -> !torch.int
%737 = torch.prim.ListConstruct %724, %730, %736 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%738 = torch.aten.reshape %715, %737 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_203 = torch.constant.int 0
%int0_204 = torch.constant.int 0
%739 = torch.aten.select.int %717, %int0_203, %int0_204 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%740 = torch.aten.item %739 : !torch.vtensor<[1],si64> -> !torch.int
%741 = torch.aten.eq.int %740, %int0_203 : !torch.int, !torch.int -> !torch.bool
%742 = torch.aten.Int.bool %741 : !torch.bool -> !torch.int
%int1_205 = torch.constant.int 1
%743 = torch.aten.mul.int %742, %int1_205 : !torch.int, !torch.int -> !torch.int
%744 = torch.aten.add.int %740, %743 : !torch.int, !torch.int -> !torch.int
%int1_206 = torch.constant.int 1
%745 = torch.aten.select.int %717, %int0_203, %int1_206 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%746 = torch.aten.item %745 : !torch.vtensor<[1],si64> -> !torch.int
%747 = torch.aten.eq.int %746, %int0_203 : !torch.int, !torch.int -> !torch.bool
%748 = torch.aten.Int.bool %747 : !torch.bool -> !torch.int
%int12_207 = torch.constant.int 12
%749 = torch.aten.mul.int %748, %int12_207 : !torch.int, !torch.int -> !torch.int
%750 = torch.aten.add.int %746, %749 : !torch.int, !torch.int -> !torch.int
%int2_208 = torch.constant.int 2
%751 = torch.aten.select.int %717, %int0_203, %int2_208 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%752 = torch.aten.item %751 : !torch.vtensor<[1],si64> -> !torch.int
%753 = torch.aten.eq.int %752, %int0_203 : !torch.int, !torch.int -> !torch.bool
%754 = torch.aten.Int.bool %753 : !torch.bool -> !torch.int
%int6_209 = torch.constant.int 6
%755 = torch.aten.mul.int %754, %int6_209 : !torch.int, !torch.int -> !torch.int
%756 = torch.aten.add.int %752, %755 : !torch.int, !torch.int -> !torch.int
%757 = torch.prim.ListConstruct %744, %750, %756 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%758 = torch.aten.reshape %658, %757 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_210 = torch.constant.int 0
%int0_211 = torch.constant.int 0
%759 = torch.aten.select.int %718, %int0_210, %int0_211 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%760 = torch.aten.item %759 : !torch.vtensor<[1],si64> -> !torch.int
%761 = torch.aten.eq.int %760, %int0_210 : !torch.int, !torch.int -> !torch.bool
%762 = torch.aten.Int.bool %761 : !torch.bool -> !torch.int
%int1_212 = torch.constant.int 1
%763 = torch.aten.mul.int %762, %int1_212 : !torch.int, !torch.int -> !torch.int
%764 = torch.aten.add.int %760, %763 : !torch.int, !torch.int -> !torch.int
%int1_213 = torch.constant.int 1
%765 = torch.aten.select.int %718, %int0_210, %int1_213 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%766 = torch.aten.item %765 : !torch.vtensor<[1],si64> -> !torch.int
%767 = torch.aten.eq.int %766, %int0_210 : !torch.int, !torch.int -> !torch.bool
%768 = torch.aten.Int.bool %767 : !torch.bool -> !torch.int
%int12_214 = torch.constant.int 12
%769 = torch.aten.mul.int %768, %int12_214 : !torch.int, !torch.int -> !torch.int
%770 = torch.aten.add.int %766, %769 : !torch.int, !torch.int -> !torch.int
%int2_215 = torch.constant.int 2
%771 = torch.aten.select.int %718, %int0_210, %int2_215 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%772 = torch.aten.item %771 : !torch.vtensor<[1],si64> -> !torch.int
%773 = torch.aten.eq.int %772, %int0_210 : !torch.int, !torch.int -> !torch.bool
%774 = torch.aten.Int.bool %773 : !torch.bool -> !torch.int
%int6_216 = torch.constant.int 6
%775 = torch.aten.mul.int %774, %int6_216 : !torch.int, !torch.int -> !torch.int
%776 = torch.aten.add.int %772, %775 : !torch.int, !torch.int -> !torch.int
%777 = torch.prim.ListConstruct %764, %770, %776 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%778 = torch.aten.reshape %687, %777 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_217 = torch.constant.int 1
%int2_218 = torch.constant.int 2
%779 = torch.aten.transpose.int %758, %int1_217, %int2_218 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%780 = torch.aten.matmul %738, %779 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%781 = torch.vtensor.literal(dense_resource<__37> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_219 = torch.constant.int 0
%int0_220 = torch.constant.int 0
%782 = torch.aten.select.int %781, %int0_219, %int0_220 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%783 = torch.aten.item %782 : !torch.vtensor<[1],si64> -> !torch.int
%784 = torch.aten.eq.int %783, %int0_219 : !torch.int, !torch.int -> !torch.bool
%785 = torch.aten.Int.bool %784 : !torch.bool -> !torch.int
%int12_221 = torch.constant.int 12
%786 = torch.aten.mul.int %785, %int12_221 : !torch.int, !torch.int -> !torch.int
%787 = torch.aten.add.int %783, %786 : !torch.int, !torch.int -> !torch.int
%int1_222 = torch.constant.int 1
%788 = torch.aten.select.int %781, %int0_219, %int1_222 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%789 = torch.aten.item %788 : !torch.vtensor<[1],si64> -> !torch.int
%790 = torch.aten.eq.int %789, %int0_219 : !torch.int, !torch.int -> !torch.bool
%791 = torch.aten.Int.bool %790 : !torch.bool -> !torch.int
%int6_223 = torch.constant.int 6
%792 = torch.aten.mul.int %791, %int6_223 : !torch.int, !torch.int -> !torch.int
%793 = torch.aten.add.int %789, %792 : !torch.int, !torch.int -> !torch.int
%int2_224 = torch.constant.int 2
%794 = torch.aten.select.int %781, %int0_219, %int2_224 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%795 = torch.aten.item %794 : !torch.vtensor<[1],si64> -> !torch.int
%796 = torch.aten.eq.int %795, %int0_219 : !torch.int, !torch.int -> !torch.bool
%797 = torch.aten.Int.bool %796 : !torch.bool -> !torch.int
%int6_225 = torch.constant.int 6
%798 = torch.aten.mul.int %797, %int6_225 : !torch.int, !torch.int -> !torch.int
%799 = torch.aten.add.int %795, %798 : !torch.int, !torch.int -> !torch.int
%int3_226 = torch.constant.int 3
%800 = torch.aten.select.int %781, %int0_219, %int3_226 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%801 = torch.aten.item %800 : !torch.vtensor<[1],si64> -> !torch.int
%802 = torch.aten.eq.int %801, %int0_219 : !torch.int, !torch.int -> !torch.bool
%803 = torch.aten.Int.bool %802 : !torch.bool -> !torch.int
%804 = torch.aten.mul.int %803, %int0_219 : !torch.int, !torch.int -> !torch.int
%805 = torch.aten.add.int %801, %804 : !torch.int, !torch.int -> !torch.int
%806 = torch.prim.ListConstruct %787, %793, %799, %805 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%807 = torch.aten.reshape %780, %806 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_227 = torch.constant.int 1
%808 = torch.aten.add.Tensor %807, %285, %int1_227 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%809 = torch.vtensor.literal(dense_resource<__38> : tensor<f32>) : !torch.vtensor<[],f32>
%810 = torch.aten.maximum %808, %809 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%811 = torch.vtensor.literal(dense_resource<__39> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_228 = torch.constant.int 0
%int0_229 = torch.constant.int 0
%812 = torch.aten.select.int %811, %int0_228, %int0_229 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%813 = torch.aten.item %812 : !torch.vtensor<[1],si64> -> !torch.int
%814 = torch.aten.eq.int %813, %int0_228 : !torch.int, !torch.int -> !torch.bool
%815 = torch.aten.Int.bool %814 : !torch.bool -> !torch.int
%int-1_230 = torch.constant.int -1
%816 = torch.aten.mul.int %815, %int-1_230 : !torch.int, !torch.int -> !torch.int
%817 = torch.aten.add.int %813, %816 : !torch.int, !torch.int -> !torch.int
%int1_231 = torch.constant.int 1
%818 = torch.aten.select.int %811, %int0_228, %int1_231 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%819 = torch.aten.item %818 : !torch.vtensor<[1],si64> -> !torch.int
%820 = torch.aten.eq.int %819, %int0_228 : !torch.int, !torch.int -> !torch.bool
%821 = torch.aten.Int.bool %820 : !torch.bool -> !torch.int
%int12_232 = torch.constant.int 12
%822 = torch.aten.mul.int %821, %int12_232 : !torch.int, !torch.int -> !torch.int
%823 = torch.aten.add.int %819, %822 : !torch.int, !torch.int -> !torch.int
%int2_233 = torch.constant.int 2
%824 = torch.aten.select.int %811, %int0_228, %int2_233 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%825 = torch.aten.item %824 : !torch.vtensor<[1],si64> -> !torch.int
%826 = torch.aten.eq.int %825, %int0_228 : !torch.int, !torch.int -> !torch.bool
%827 = torch.aten.Int.bool %826 : !torch.bool -> !torch.int
%int6_234 = torch.constant.int 6
%828 = torch.aten.mul.int %827, %int6_234 : !torch.int, !torch.int -> !torch.int
%829 = torch.aten.add.int %825, %828 : !torch.int, !torch.int -> !torch.int
%830 = torch.prim.ListConstruct %817, %823, %829 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%831 = torch.aten.reshape %810, %830 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_235 = torch.constant.int 2
%none_236 = torch.constant.none
%832 = torch.aten.softmax.int %831, %int2_235, %none_236 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%833 = torch.aten.matmul %832, %778 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%834 = torch.vtensor.literal(dense_resource<__40> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_237 = torch.constant.int 0
%int0_238 = torch.constant.int 0
%835 = torch.aten.select.int %834, %int0_237, %int0_238 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%836 = torch.aten.item %835 : !torch.vtensor<[1],si64> -> !torch.int
%837 = torch.aten.eq.int %836, %int0_237 : !torch.int, !torch.int -> !torch.bool
%838 = torch.aten.Int.bool %837 : !torch.bool -> !torch.int
%int12_239 = torch.constant.int 12
%839 = torch.aten.mul.int %838, %int12_239 : !torch.int, !torch.int -> !torch.int
%840 = torch.aten.add.int %836, %839 : !torch.int, !torch.int -> !torch.int
%int1_240 = torch.constant.int 1
%841 = torch.aten.select.int %834, %int0_237, %int1_240 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%842 = torch.aten.item %841 : !torch.vtensor<[1],si64> -> !torch.int
%843 = torch.aten.eq.int %842, %int0_237 : !torch.int, !torch.int -> !torch.bool
%844 = torch.aten.Int.bool %843 : !torch.bool -> !torch.int
%int6_241 = torch.constant.int 6
%845 = torch.aten.mul.int %844, %int6_241 : !torch.int, !torch.int -> !torch.int
%846 = torch.aten.add.int %842, %845 : !torch.int, !torch.int -> !torch.int
%int2_242 = torch.constant.int 2
%847 = torch.aten.select.int %834, %int0_237, %int2_242 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%848 = torch.aten.item %847 : !torch.vtensor<[1],si64> -> !torch.int
%849 = torch.aten.eq.int %848, %int0_237 : !torch.int, !torch.int -> !torch.bool
%850 = torch.aten.Int.bool %849 : !torch.bool -> !torch.int
%int64_243 = torch.constant.int 64
%851 = torch.aten.mul.int %850, %int64_243 : !torch.int, !torch.int -> !torch.int
%852 = torch.aten.add.int %848, %851 : !torch.int, !torch.int -> !torch.int
%int3_244 = torch.constant.int 3
%853 = torch.aten.select.int %834, %int0_237, %int3_244 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%854 = torch.aten.item %853 : !torch.vtensor<[1],si64> -> !torch.int
%855 = torch.aten.eq.int %854, %int0_237 : !torch.int, !torch.int -> !torch.bool
%856 = torch.aten.Int.bool %855 : !torch.bool -> !torch.int
%857 = torch.aten.mul.int %856, %int0_237 : !torch.int, !torch.int -> !torch.int
%858 = torch.aten.add.int %854, %857 : !torch.int, !torch.int -> !torch.int
%859 = torch.prim.ListConstruct %840, %846, %852, %858 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%860 = torch.aten.reshape %833, %859 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_245 = torch.constant.int 1
%int2_246 = torch.constant.int 2
%861 = torch.aten.transpose.int %860, %int1_245, %int2_246 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%862 = torch.vtensor.literal(dense_resource<__41> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_247 = torch.constant.int 0
%int0_248 = torch.constant.int 0
%863 = torch.aten.select.int %862, %int0_247, %int0_248 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%864 = torch.aten.item %863 : !torch.vtensor<[1],si64> -> !torch.int
%865 = torch.aten.eq.int %864, %int0_247 : !torch.int, !torch.int -> !torch.bool
%866 = torch.aten.Int.bool %865 : !torch.bool -> !torch.int
%int1_249 = torch.constant.int 1
%867 = torch.aten.mul.int %866, %int1_249 : !torch.int, !torch.int -> !torch.int
%868 = torch.aten.add.int %864, %867 : !torch.int, !torch.int -> !torch.int
%int1_250 = torch.constant.int 1
%869 = torch.aten.select.int %862, %int0_247, %int1_250 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%870 = torch.aten.item %869 : !torch.vtensor<[1],si64> -> !torch.int
%871 = torch.aten.eq.int %870, %int0_247 : !torch.int, !torch.int -> !torch.bool
%872 = torch.aten.Int.bool %871 : !torch.bool -> !torch.int
%int6_251 = torch.constant.int 6
%873 = torch.aten.mul.int %872, %int6_251 : !torch.int, !torch.int -> !torch.int
%874 = torch.aten.add.int %870, %873 : !torch.int, !torch.int -> !torch.int
%int2_252 = torch.constant.int 2
%875 = torch.aten.select.int %862, %int0_247, %int2_252 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%876 = torch.aten.item %875 : !torch.vtensor<[1],si64> -> !torch.int
%877 = torch.aten.eq.int %876, %int0_247 : !torch.int, !torch.int -> !torch.bool
%878 = torch.aten.Int.bool %877 : !torch.bool -> !torch.int
%int12_253 = torch.constant.int 12
%879 = torch.aten.mul.int %878, %int12_253 : !torch.int, !torch.int -> !torch.int
%880 = torch.aten.add.int %876, %879 : !torch.int, !torch.int -> !torch.int
%881 = torch.prim.ListConstruct %868, %874, %880 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%882 = torch.aten.reshape %861, %881 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%883 = torch.aten.matmul %882, %155 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_254 = torch.constant.int 1
%884 = torch.aten.add.Tensor %19, %883, %int1_254 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_255 = torch.constant.int 1
%885 = torch.aten.add.Tensor %622, %884, %int1_255 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%886 = torch.vtensor.literal(dense_resource<__42> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_256 = torch.constant.int 0
%int0_257 = torch.constant.int 0
%887 = torch.aten.select.int %886, %int0_256, %int0_257 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%888 = torch.aten.item %887 : !torch.vtensor<[1],si64> -> !torch.int
%889 = torch.aten.eq.int %888, %int0_256 : !torch.int, !torch.int -> !torch.bool
%890 = torch.aten.Int.bool %889 : !torch.bool -> !torch.int
%int1_258 = torch.constant.int 1
%891 = torch.aten.mul.int %890, %int1_258 : !torch.int, !torch.int -> !torch.int
%892 = torch.aten.add.int %888, %891 : !torch.int, !torch.int -> !torch.int
%int1_259 = torch.constant.int 1
%893 = torch.aten.select.int %886, %int0_256, %int1_259 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%894 = torch.aten.item %893 : !torch.vtensor<[1],si64> -> !torch.int
%895 = torch.aten.eq.int %894, %int0_256 : !torch.int, !torch.int -> !torch.bool
%896 = torch.aten.Int.bool %895 : !torch.bool -> !torch.int
%int6_260 = torch.constant.int 6
%897 = torch.aten.mul.int %896, %int6_260 : !torch.int, !torch.int -> !torch.int
%898 = torch.aten.add.int %894, %897 : !torch.int, !torch.int -> !torch.int
%899 = torch.prim.ListConstruct %892, %898 : (!torch.int, !torch.int) -> !torch.list<int>
%900 = torch.aten.reshape %885, %899 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_261 = torch.constant.float 9.9999997473787516E-6
%int768_262 = torch.constant.int 768
%901 = torch.prim.ListConstruct %int768_262 : (!torch.int) -> !torch.list<int>
%result0_263, %result1_264, %result2_265 = torch.aten.native_layer_norm %900, %901, %26, %27, %float9.999990e-06_261 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_266 = torch.constant.int 0
%int1_267 = torch.constant.int 1
%902 = torch.aten.transpose.int %22, %int0_266, %int1_267 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%903 = torch.aten.mm %result0_263, %902 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%904 = torch.aten.add.Tensor %903, %23, %int1_267 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%905 = torch.aten.relu %904 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_268 = torch.constant.int 0
%int1_269 = torch.constant.int 1
%906 = torch.aten.transpose.int %24, %int0_268, %int1_269 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%907 = torch.aten.mm %905, %906 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%908 = torch.aten.add.Tensor %907, %25, %int1_269 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_270 = torch.constant.int 1
%909 = torch.aten.add.Tensor %900, %908, %int1_270 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%910 = torch.vtensor.literal(dense_resource<__43> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_271 = torch.constant.int 0
%int0_272 = torch.constant.int 0
%911 = torch.aten.select.int %910, %int0_271, %int0_272 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%912 = torch.aten.item %911 : !torch.vtensor<[1],si64> -> !torch.int
%913 = torch.aten.eq.int %912, %int0_271 : !torch.int, !torch.int -> !torch.bool
%914 = torch.aten.Int.bool %913 : !torch.bool -> !torch.int
%int6_273 = torch.constant.int 6
%915 = torch.aten.mul.int %914, %int6_273 : !torch.int, !torch.int -> !torch.int
%916 = torch.aten.add.int %912, %915 : !torch.int, !torch.int -> !torch.int
%int1_274 = torch.constant.int 1
%917 = torch.aten.select.int %910, %int0_271, %int1_274 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%918 = torch.aten.item %917 : !torch.vtensor<[1],si64> -> !torch.int
%919 = torch.aten.eq.int %918, %int0_271 : !torch.int, !torch.int -> !torch.bool
%920 = torch.aten.Int.bool %919 : !torch.bool -> !torch.int
%int768_275 = torch.constant.int 768
%921 = torch.aten.mul.int %920, %int768_275 : !torch.int, !torch.int -> !torch.int
%922 = torch.aten.add.int %918, %921 : !torch.int, !torch.int -> !torch.int
%int2_276 = torch.constant.int 2
%923 = torch.aten.select.int %910, %int0_271, %int2_276 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%924 = torch.aten.item %923 : !torch.vtensor<[1],si64> -> !torch.int
%925 = torch.aten.eq.int %924, %int0_271 : !torch.int, !torch.int -> !torch.bool
%926 = torch.aten.Int.bool %925 : !torch.bool -> !torch.int
%927 = torch.aten.mul.int %926, %int0_271 : !torch.int, !torch.int -> !torch.int
%928 = torch.aten.add.int %924, %927 : !torch.int, !torch.int -> !torch.int
%929 = torch.prim.ListConstruct %916, %922, %928 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%930 = torch.aten.reshape %909, %929 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_277 = torch.constant.float 9.9999997473787516E-6
%int768_278 = torch.constant.int 768
%931 = torch.prim.ListConstruct %int768_278 : (!torch.int) -> !torch.list<int>
%result0_279, %result1_280, %result2_281 = torch.aten.native_layer_norm %930, %931, %32, %33, %float9.999990e-06_277 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%932 = torch.aten.matmul %result0_279, %156 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_282 = torch.constant.int 1
%933 = torch.aten.add.Tensor %30, %932, %int1_282 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%934 = torch.vtensor.literal(dense_resource<__44> : tensor<f32>) : !torch.vtensor<[],f32>
%935 = torch.aten.mul.Tensor %933, %934 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%936 = torch.aten.matmul %result0_279, %157 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_283 = torch.constant.int 1
%937 = torch.aten.add.Tensor %28, %936, %int1_283 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%938 = torch.vtensor.literal(dense_resource<__45> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%939 = torch.vtensor.literal(dense_resource<__46> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_284 = torch.constant.int 0
%int0_285 = torch.constant.int 0
%940 = torch.aten.select.int %938, %int0_284, %int0_285 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%941 = torch.aten.item %940 : !torch.vtensor<[1],si64> -> !torch.int
%942 = torch.aten.eq.int %941, %int0_284 : !torch.int, !torch.int -> !torch.bool
%943 = torch.aten.Int.bool %942 : !torch.bool -> !torch.int
%int1_286 = torch.constant.int 1
%944 = torch.aten.mul.int %943, %int1_286 : !torch.int, !torch.int -> !torch.int
%945 = torch.aten.add.int %941, %944 : !torch.int, !torch.int -> !torch.int
%int1_287 = torch.constant.int 1
%946 = torch.aten.select.int %938, %int0_284, %int1_287 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%947 = torch.aten.item %946 : !torch.vtensor<[1],si64> -> !torch.int
%948 = torch.aten.eq.int %947, %int0_284 : !torch.int, !torch.int -> !torch.bool
%949 = torch.aten.Int.bool %948 : !torch.bool -> !torch.int
%int6_288 = torch.constant.int 6
%950 = torch.aten.mul.int %949, %int6_288 : !torch.int, !torch.int -> !torch.int
%951 = torch.aten.add.int %947, %950 : !torch.int, !torch.int -> !torch.int
%int2_289 = torch.constant.int 2
%952 = torch.aten.select.int %938, %int0_284, %int2_289 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%953 = torch.aten.item %952 : !torch.vtensor<[1],si64> -> !torch.int
%954 = torch.aten.eq.int %953, %int0_284 : !torch.int, !torch.int -> !torch.bool
%955 = torch.aten.Int.bool %954 : !torch.bool -> !torch.int
%int768_290 = torch.constant.int 768
%956 = torch.aten.mul.int %955, %int768_290 : !torch.int, !torch.int -> !torch.int
%957 = torch.aten.add.int %953, %956 : !torch.int, !torch.int -> !torch.int
%int3_291 = torch.constant.int 3
%958 = torch.aten.select.int %938, %int0_284, %int3_291 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%959 = torch.aten.item %958 : !torch.vtensor<[1],si64> -> !torch.int
%960 = torch.aten.eq.int %959, %int0_284 : !torch.int, !torch.int -> !torch.bool
%961 = torch.aten.Int.bool %960 : !torch.bool -> !torch.int
%962 = torch.aten.mul.int %961, %int0_284 : !torch.int, !torch.int -> !torch.int
%963 = torch.aten.add.int %959, %962 : !torch.int, !torch.int -> !torch.int
%964 = torch.prim.ListConstruct %945, %951, %957, %963 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%965 = torch.aten.reshape %937, %964 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_292 = torch.constant.int 1
%int2_293 = torch.constant.int 2
%966 = torch.aten.transpose.int %965, %int1_292, %int2_293 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%967 = torch.aten.matmul %result0_279, %158 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_294 = torch.constant.int 1
%968 = torch.aten.add.Tensor %29, %967, %int1_294 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_295 = torch.constant.int 0
%int0_296 = torch.constant.int 0
%969 = torch.aten.select.int %939, %int0_295, %int0_296 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%970 = torch.aten.item %969 : !torch.vtensor<[1],si64> -> !torch.int
%971 = torch.aten.eq.int %970, %int0_295 : !torch.int, !torch.int -> !torch.bool
%972 = torch.aten.Int.bool %971 : !torch.bool -> !torch.int
%int1_297 = torch.constant.int 1
%973 = torch.aten.mul.int %972, %int1_297 : !torch.int, !torch.int -> !torch.int
%974 = torch.aten.add.int %970, %973 : !torch.int, !torch.int -> !torch.int
%int1_298 = torch.constant.int 1
%975 = torch.aten.select.int %939, %int0_295, %int1_298 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%976 = torch.aten.item %975 : !torch.vtensor<[1],si64> -> !torch.int
%977 = torch.aten.eq.int %976, %int0_295 : !torch.int, !torch.int -> !torch.bool
%978 = torch.aten.Int.bool %977 : !torch.bool -> !torch.int
%int6_299 = torch.constant.int 6
%979 = torch.aten.mul.int %978, %int6_299 : !torch.int, !torch.int -> !torch.int
%980 = torch.aten.add.int %976, %979 : !torch.int, !torch.int -> !torch.int
%int2_300 = torch.constant.int 2
%981 = torch.aten.select.int %939, %int0_295, %int2_300 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%982 = torch.aten.item %981 : !torch.vtensor<[1],si64> -> !torch.int
%983 = torch.aten.eq.int %982, %int0_295 : !torch.int, !torch.int -> !torch.bool
%984 = torch.aten.Int.bool %983 : !torch.bool -> !torch.int
%int768_301 = torch.constant.int 768
%985 = torch.aten.mul.int %984, %int768_301 : !torch.int, !torch.int -> !torch.int
%986 = torch.aten.add.int %982, %985 : !torch.int, !torch.int -> !torch.int
%int3_302 = torch.constant.int 3
%987 = torch.aten.select.int %939, %int0_295, %int3_302 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%988 = torch.aten.item %987 : !torch.vtensor<[1],si64> -> !torch.int
%989 = torch.aten.eq.int %988, %int0_295 : !torch.int, !torch.int -> !torch.bool
%990 = torch.aten.Int.bool %989 : !torch.bool -> !torch.int
%991 = torch.aten.mul.int %990, %int0_295 : !torch.int, !torch.int -> !torch.int
%992 = torch.aten.add.int %988, %991 : !torch.int, !torch.int -> !torch.int
%993 = torch.prim.ListConstruct %974, %980, %986, %992 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%994 = torch.aten.reshape %968, %993 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_303 = torch.constant.int 1
%int2_304 = torch.constant.int 2
%995 = torch.aten.transpose.int %994, %int1_303, %int2_304 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%996 = torch.vtensor.literal(dense_resource<__47> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_305 = torch.constant.int 0
%int0_306 = torch.constant.int 0
%997 = torch.aten.select.int %996, %int0_305, %int0_306 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%998 = torch.aten.item %997 : !torch.vtensor<[1],si64> -> !torch.int
%999 = torch.aten.eq.int %998, %int0_305 : !torch.int, !torch.int -> !torch.bool
%1000 = torch.aten.Int.bool %999 : !torch.bool -> !torch.int
%int1_307 = torch.constant.int 1
%1001 = torch.aten.mul.int %1000, %int1_307 : !torch.int, !torch.int -> !torch.int
%1002 = torch.aten.add.int %998, %1001 : !torch.int, !torch.int -> !torch.int
%int1_308 = torch.constant.int 1
%1003 = torch.aten.select.int %996, %int0_305, %int1_308 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1004 = torch.aten.item %1003 : !torch.vtensor<[1],si64> -> !torch.int
%1005 = torch.aten.eq.int %1004, %int0_305 : !torch.int, !torch.int -> !torch.bool
%1006 = torch.aten.Int.bool %1005 : !torch.bool -> !torch.int
%int6_309 = torch.constant.int 6
%1007 = torch.aten.mul.int %1006, %int6_309 : !torch.int, !torch.int -> !torch.int
%1008 = torch.aten.add.int %1004, %1007 : !torch.int, !torch.int -> !torch.int
%int2_310 = torch.constant.int 2
%1009 = torch.aten.select.int %996, %int0_305, %int2_310 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1010 = torch.aten.item %1009 : !torch.vtensor<[1],si64> -> !torch.int
%1011 = torch.aten.eq.int %1010, %int0_305 : !torch.int, !torch.int -> !torch.bool
%1012 = torch.aten.Int.bool %1011 : !torch.bool -> !torch.int
%int768_311 = torch.constant.int 768
%1013 = torch.aten.mul.int %1012, %int768_311 : !torch.int, !torch.int -> !torch.int
%1014 = torch.aten.add.int %1010, %1013 : !torch.int, !torch.int -> !torch.int
%int3_312 = torch.constant.int 3
%1015 = torch.aten.select.int %996, %int0_305, %int3_312 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1016 = torch.aten.item %1015 : !torch.vtensor<[1],si64> -> !torch.int
%1017 = torch.aten.eq.int %1016, %int0_305 : !torch.int, !torch.int -> !torch.bool
%1018 = torch.aten.Int.bool %1017 : !torch.bool -> !torch.int
%1019 = torch.aten.mul.int %1018, %int0_305 : !torch.int, !torch.int -> !torch.int
%1020 = torch.aten.add.int %1016, %1019 : !torch.int, !torch.int -> !torch.int
%1021 = torch.prim.ListConstruct %1002, %1008, %1014, %1020 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1022 = torch.aten.reshape %935, %1021 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_313 = torch.constant.int 1
%int2_314 = torch.constant.int 2
%1023 = torch.aten.transpose.int %1022, %int1_313, %int2_314 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1024 = torch.vtensor.literal(dense_resource<__48> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1025 = torch.vtensor.literal(dense_resource<__49> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1026 = torch.vtensor.literal(dense_resource<__50> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_315 = torch.constant.int 0
%int0_316 = torch.constant.int 0
%1027 = torch.aten.select.int %1024, %int0_315, %int0_316 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1028 = torch.aten.item %1027 : !torch.vtensor<[1],si64> -> !torch.int
%1029 = torch.aten.eq.int %1028, %int0_315 : !torch.int, !torch.int -> !torch.bool
%1030 = torch.aten.Int.bool %1029 : !torch.bool -> !torch.int
%int1_317 = torch.constant.int 1
%1031 = torch.aten.mul.int %1030, %int1_317 : !torch.int, !torch.int -> !torch.int
%1032 = torch.aten.add.int %1028, %1031 : !torch.int, !torch.int -> !torch.int
%int1_318 = torch.constant.int 1
%1033 = torch.aten.select.int %1024, %int0_315, %int1_318 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1034 = torch.aten.item %1033 : !torch.vtensor<[1],si64> -> !torch.int
%1035 = torch.aten.eq.int %1034, %int0_315 : !torch.int, !torch.int -> !torch.bool
%1036 = torch.aten.Int.bool %1035 : !torch.bool -> !torch.int
%int12_319 = torch.constant.int 12
%1037 = torch.aten.mul.int %1036, %int12_319 : !torch.int, !torch.int -> !torch.int
%1038 = torch.aten.add.int %1034, %1037 : !torch.int, !torch.int -> !torch.int
%int2_320 = torch.constant.int 2
%1039 = torch.aten.select.int %1024, %int0_315, %int2_320 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1040 = torch.aten.item %1039 : !torch.vtensor<[1],si64> -> !torch.int
%1041 = torch.aten.eq.int %1040, %int0_315 : !torch.int, !torch.int -> !torch.bool
%1042 = torch.aten.Int.bool %1041 : !torch.bool -> !torch.int
%int6_321 = torch.constant.int 6
%1043 = torch.aten.mul.int %1042, %int6_321 : !torch.int, !torch.int -> !torch.int
%1044 = torch.aten.add.int %1040, %1043 : !torch.int, !torch.int -> !torch.int
%1045 = torch.prim.ListConstruct %1032, %1038, %1044 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1046 = torch.aten.reshape %1023, %1045 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_322 = torch.constant.int 0
%int0_323 = torch.constant.int 0
%1047 = torch.aten.select.int %1025, %int0_322, %int0_323 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1048 = torch.aten.item %1047 : !torch.vtensor<[1],si64> -> !torch.int
%1049 = torch.aten.eq.int %1048, %int0_322 : !torch.int, !torch.int -> !torch.bool
%1050 = torch.aten.Int.bool %1049 : !torch.bool -> !torch.int
%int1_324 = torch.constant.int 1
%1051 = torch.aten.mul.int %1050, %int1_324 : !torch.int, !torch.int -> !torch.int
%1052 = torch.aten.add.int %1048, %1051 : !torch.int, !torch.int -> !torch.int
%int1_325 = torch.constant.int 1
%1053 = torch.aten.select.int %1025, %int0_322, %int1_325 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1054 = torch.aten.item %1053 : !torch.vtensor<[1],si64> -> !torch.int
%1055 = torch.aten.eq.int %1054, %int0_322 : !torch.int, !torch.int -> !torch.bool
%1056 = torch.aten.Int.bool %1055 : !torch.bool -> !torch.int
%int12_326 = torch.constant.int 12
%1057 = torch.aten.mul.int %1056, %int12_326 : !torch.int, !torch.int -> !torch.int
%1058 = torch.aten.add.int %1054, %1057 : !torch.int, !torch.int -> !torch.int
%int2_327 = torch.constant.int 2
%1059 = torch.aten.select.int %1025, %int0_322, %int2_327 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1060 = torch.aten.item %1059 : !torch.vtensor<[1],si64> -> !torch.int
%1061 = torch.aten.eq.int %1060, %int0_322 : !torch.int, !torch.int -> !torch.bool
%1062 = torch.aten.Int.bool %1061 : !torch.bool -> !torch.int
%int6_328 = torch.constant.int 6
%1063 = torch.aten.mul.int %1062, %int6_328 : !torch.int, !torch.int -> !torch.int
%1064 = torch.aten.add.int %1060, %1063 : !torch.int, !torch.int -> !torch.int
%1065 = torch.prim.ListConstruct %1052, %1058, %1064 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1066 = torch.aten.reshape %966, %1065 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_329 = torch.constant.int 0
%int0_330 = torch.constant.int 0
%1067 = torch.aten.select.int %1026, %int0_329, %int0_330 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1068 = torch.aten.item %1067 : !torch.vtensor<[1],si64> -> !torch.int
%1069 = torch.aten.eq.int %1068, %int0_329 : !torch.int, !torch.int -> !torch.bool
%1070 = torch.aten.Int.bool %1069 : !torch.bool -> !torch.int
%int1_331 = torch.constant.int 1
%1071 = torch.aten.mul.int %1070, %int1_331 : !torch.int, !torch.int -> !torch.int
%1072 = torch.aten.add.int %1068, %1071 : !torch.int, !torch.int -> !torch.int
%int1_332 = torch.constant.int 1
%1073 = torch.aten.select.int %1026, %int0_329, %int1_332 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1074 = torch.aten.item %1073 : !torch.vtensor<[1],si64> -> !torch.int
%1075 = torch.aten.eq.int %1074, %int0_329 : !torch.int, !torch.int -> !torch.bool
%1076 = torch.aten.Int.bool %1075 : !torch.bool -> !torch.int
%int12_333 = torch.constant.int 12
%1077 = torch.aten.mul.int %1076, %int12_333 : !torch.int, !torch.int -> !torch.int
%1078 = torch.aten.add.int %1074, %1077 : !torch.int, !torch.int -> !torch.int
%int2_334 = torch.constant.int 2
%1079 = torch.aten.select.int %1026, %int0_329, %int2_334 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1080 = torch.aten.item %1079 : !torch.vtensor<[1],si64> -> !torch.int
%1081 = torch.aten.eq.int %1080, %int0_329 : !torch.int, !torch.int -> !torch.bool
%1082 = torch.aten.Int.bool %1081 : !torch.bool -> !torch.int
%int6_335 = torch.constant.int 6
%1083 = torch.aten.mul.int %1082, %int6_335 : !torch.int, !torch.int -> !torch.int
%1084 = torch.aten.add.int %1080, %1083 : !torch.int, !torch.int -> !torch.int
%1085 = torch.prim.ListConstruct %1072, %1078, %1084 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1086 = torch.aten.reshape %995, %1085 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_336 = torch.constant.int 1
%int2_337 = torch.constant.int 2
%1087 = torch.aten.transpose.int %1066, %int1_336, %int2_337 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%1088 = torch.aten.matmul %1046, %1087 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%1089 = torch.vtensor.literal(dense_resource<__51> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_338 = torch.constant.int 0
%int0_339 = torch.constant.int 0
%1090 = torch.aten.select.int %1089, %int0_338, %int0_339 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1091 = torch.aten.item %1090 : !torch.vtensor<[1],si64> -> !torch.int
%1092 = torch.aten.eq.int %1091, %int0_338 : !torch.int, !torch.int -> !torch.bool
%1093 = torch.aten.Int.bool %1092 : !torch.bool -> !torch.int
%int12_340 = torch.constant.int 12
%1094 = torch.aten.mul.int %1093, %int12_340 : !torch.int, !torch.int -> !torch.int
%1095 = torch.aten.add.int %1091, %1094 : !torch.int, !torch.int -> !torch.int
%int1_341 = torch.constant.int 1
%1096 = torch.aten.select.int %1089, %int0_338, %int1_341 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1097 = torch.aten.item %1096 : !torch.vtensor<[1],si64> -> !torch.int
%1098 = torch.aten.eq.int %1097, %int0_338 : !torch.int, !torch.int -> !torch.bool
%1099 = torch.aten.Int.bool %1098 : !torch.bool -> !torch.int
%int6_342 = torch.constant.int 6
%1100 = torch.aten.mul.int %1099, %int6_342 : !torch.int, !torch.int -> !torch.int
%1101 = torch.aten.add.int %1097, %1100 : !torch.int, !torch.int -> !torch.int
%int2_343 = torch.constant.int 2
%1102 = torch.aten.select.int %1089, %int0_338, %int2_343 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1103 = torch.aten.item %1102 : !torch.vtensor<[1],si64> -> !torch.int
%1104 = torch.aten.eq.int %1103, %int0_338 : !torch.int, !torch.int -> !torch.bool
%1105 = torch.aten.Int.bool %1104 : !torch.bool -> !torch.int
%int6_344 = torch.constant.int 6
%1106 = torch.aten.mul.int %1105, %int6_344 : !torch.int, !torch.int -> !torch.int
%1107 = torch.aten.add.int %1103, %1106 : !torch.int, !torch.int -> !torch.int
%int3_345 = torch.constant.int 3
%1108 = torch.aten.select.int %1089, %int0_338, %int3_345 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1109 = torch.aten.item %1108 : !torch.vtensor<[1],si64> -> !torch.int
%1110 = torch.aten.eq.int %1109, %int0_338 : !torch.int, !torch.int -> !torch.bool
%1111 = torch.aten.Int.bool %1110 : !torch.bool -> !torch.int
%1112 = torch.aten.mul.int %1111, %int0_338 : !torch.int, !torch.int -> !torch.int
%1113 = torch.aten.add.int %1109, %1112 : !torch.int, !torch.int -> !torch.int
%1114 = torch.prim.ListConstruct %1095, %1101, %1107, %1113 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1115 = torch.aten.reshape %1088, %1114 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_346 = torch.constant.int 1
%1116 = torch.aten.add.Tensor %1115, %285, %int1_346 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%1117 = torch.vtensor.literal(dense_resource<__52> : tensor<f32>) : !torch.vtensor<[],f32>
%1118 = torch.aten.maximum %1116, %1117 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%1119 = torch.vtensor.literal(dense_resource<__53> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_347 = torch.constant.int 0
%int0_348 = torch.constant.int 0
%1120 = torch.aten.select.int %1119, %int0_347, %int0_348 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1121 = torch.aten.item %1120 : !torch.vtensor<[1],si64> -> !torch.int
%1122 = torch.aten.eq.int %1121, %int0_347 : !torch.int, !torch.int -> !torch.bool
%1123 = torch.aten.Int.bool %1122 : !torch.bool -> !torch.int
%int-1_349 = torch.constant.int -1
%1124 = torch.aten.mul.int %1123, %int-1_349 : !torch.int, !torch.int -> !torch.int
%1125 = torch.aten.add.int %1121, %1124 : !torch.int, !torch.int -> !torch.int
%int1_350 = torch.constant.int 1
%1126 = torch.aten.select.int %1119, %int0_347, %int1_350 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1127 = torch.aten.item %1126 : !torch.vtensor<[1],si64> -> !torch.int
%1128 = torch.aten.eq.int %1127, %int0_347 : !torch.int, !torch.int -> !torch.bool
%1129 = torch.aten.Int.bool %1128 : !torch.bool -> !torch.int
%int12_351 = torch.constant.int 12
%1130 = torch.aten.mul.int %1129, %int12_351 : !torch.int, !torch.int -> !torch.int
%1131 = torch.aten.add.int %1127, %1130 : !torch.int, !torch.int -> !torch.int
%int2_352 = torch.constant.int 2
%1132 = torch.aten.select.int %1119, %int0_347, %int2_352 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1133 = torch.aten.item %1132 : !torch.vtensor<[1],si64> -> !torch.int
%1134 = torch.aten.eq.int %1133, %int0_347 : !torch.int, !torch.int -> !torch.bool
%1135 = torch.aten.Int.bool %1134 : !torch.bool -> !torch.int
%int6_353 = torch.constant.int 6
%1136 = torch.aten.mul.int %1135, %int6_353 : !torch.int, !torch.int -> !torch.int
%1137 = torch.aten.add.int %1133, %1136 : !torch.int, !torch.int -> !torch.int
%1138 = torch.prim.ListConstruct %1125, %1131, %1137 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1139 = torch.aten.reshape %1118, %1138 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_354 = torch.constant.int 2
%none_355 = torch.constant.none
%1140 = torch.aten.softmax.int %1139, %int2_354, %none_355 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%1141 = torch.aten.matmul %1140, %1086 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%1142 = torch.vtensor.literal(dense_resource<__54> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_356 = torch.constant.int 0
%int0_357 = torch.constant.int 0
%1143 = torch.aten.select.int %1142, %int0_356, %int0_357 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1144 = torch.aten.item %1143 : !torch.vtensor<[1],si64> -> !torch.int
%1145 = torch.aten.eq.int %1144, %int0_356 : !torch.int, !torch.int -> !torch.bool
%1146 = torch.aten.Int.bool %1145 : !torch.bool -> !torch.int
%int12_358 = torch.constant.int 12
%1147 = torch.aten.mul.int %1146, %int12_358 : !torch.int, !torch.int -> !torch.int
%1148 = torch.aten.add.int %1144, %1147 : !torch.int, !torch.int -> !torch.int
%int1_359 = torch.constant.int 1
%1149 = torch.aten.select.int %1142, %int0_356, %int1_359 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1150 = torch.aten.item %1149 : !torch.vtensor<[1],si64> -> !torch.int
%1151 = torch.aten.eq.int %1150, %int0_356 : !torch.int, !torch.int -> !torch.bool
%1152 = torch.aten.Int.bool %1151 : !torch.bool -> !torch.int
%int6_360 = torch.constant.int 6
%1153 = torch.aten.mul.int %1152, %int6_360 : !torch.int, !torch.int -> !torch.int
%1154 = torch.aten.add.int %1150, %1153 : !torch.int, !torch.int -> !torch.int
%int2_361 = torch.constant.int 2
%1155 = torch.aten.select.int %1142, %int0_356, %int2_361 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1156 = torch.aten.item %1155 : !torch.vtensor<[1],si64> -> !torch.int
%1157 = torch.aten.eq.int %1156, %int0_356 : !torch.int, !torch.int -> !torch.bool
%1158 = torch.aten.Int.bool %1157 : !torch.bool -> !torch.int
%int64_362 = torch.constant.int 64
%1159 = torch.aten.mul.int %1158, %int64_362 : !torch.int, !torch.int -> !torch.int
%1160 = torch.aten.add.int %1156, %1159 : !torch.int, !torch.int -> !torch.int
%int3_363 = torch.constant.int 3
%1161 = torch.aten.select.int %1142, %int0_356, %int3_363 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1162 = torch.aten.item %1161 : !torch.vtensor<[1],si64> -> !torch.int
%1163 = torch.aten.eq.int %1162, %int0_356 : !torch.int, !torch.int -> !torch.bool
%1164 = torch.aten.Int.bool %1163 : !torch.bool -> !torch.int
%1165 = torch.aten.mul.int %1164, %int0_356 : !torch.int, !torch.int -> !torch.int
%1166 = torch.aten.add.int %1162, %1165 : !torch.int, !torch.int -> !torch.int
%1167 = torch.prim.ListConstruct %1148, %1154, %1160, %1166 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1168 = torch.aten.reshape %1141, %1167 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_364 = torch.constant.int 1
%int2_365 = torch.constant.int 2
%1169 = torch.aten.transpose.int %1168, %int1_364, %int2_365 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%1170 = torch.vtensor.literal(dense_resource<__55> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_366 = torch.constant.int 0
%int0_367 = torch.constant.int 0
%1171 = torch.aten.select.int %1170, %int0_366, %int0_367 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1172 = torch.aten.item %1171 : !torch.vtensor<[1],si64> -> !torch.int
%1173 = torch.aten.eq.int %1172, %int0_366 : !torch.int, !torch.int -> !torch.bool
%1174 = torch.aten.Int.bool %1173 : !torch.bool -> !torch.int
%int1_368 = torch.constant.int 1
%1175 = torch.aten.mul.int %1174, %int1_368 : !torch.int, !torch.int -> !torch.int
%1176 = torch.aten.add.int %1172, %1175 : !torch.int, !torch.int -> !torch.int
%int1_369 = torch.constant.int 1
%1177 = torch.aten.select.int %1170, %int0_366, %int1_369 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1178 = torch.aten.item %1177 : !torch.vtensor<[1],si64> -> !torch.int
%1179 = torch.aten.eq.int %1178, %int0_366 : !torch.int, !torch.int -> !torch.bool
%1180 = torch.aten.Int.bool %1179 : !torch.bool -> !torch.int
%int6_370 = torch.constant.int 6
%1181 = torch.aten.mul.int %1180, %int6_370 : !torch.int, !torch.int -> !torch.int
%1182 = torch.aten.add.int %1178, %1181 : !torch.int, !torch.int -> !torch.int
%int2_371 = torch.constant.int 2
%1183 = torch.aten.select.int %1170, %int0_366, %int2_371 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1184 = torch.aten.item %1183 : !torch.vtensor<[1],si64> -> !torch.int
%1185 = torch.aten.eq.int %1184, %int0_366 : !torch.int, !torch.int -> !torch.bool
%1186 = torch.aten.Int.bool %1185 : !torch.bool -> !torch.int
%int12_372 = torch.constant.int 12
%1187 = torch.aten.mul.int %1186, %int12_372 : !torch.int, !torch.int -> !torch.int
%1188 = torch.aten.add.int %1184, %1187 : !torch.int, !torch.int -> !torch.int
%1189 = torch.prim.ListConstruct %1176, %1182, %1188 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1190 = torch.aten.reshape %1169, %1189 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%1191 = torch.aten.matmul %1190, %159 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_373 = torch.constant.int 1
%1192 = torch.aten.add.Tensor %31, %1191, %int1_373 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_374 = torch.constant.int 1
%1193 = torch.aten.add.Tensor %930, %1192, %int1_374 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1194 = torch.vtensor.literal(dense_resource<__56> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_375 = torch.constant.int 0
%int0_376 = torch.constant.int 0
%1195 = torch.aten.select.int %1194, %int0_375, %int0_376 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1196 = torch.aten.item %1195 : !torch.vtensor<[1],si64> -> !torch.int
%1197 = torch.aten.eq.int %1196, %int0_375 : !torch.int, !torch.int -> !torch.bool
%1198 = torch.aten.Int.bool %1197 : !torch.bool -> !torch.int
%int1_377 = torch.constant.int 1
%1199 = torch.aten.mul.int %1198, %int1_377 : !torch.int, !torch.int -> !torch.int
%1200 = torch.aten.add.int %1196, %1199 : !torch.int, !torch.int -> !torch.int
%int1_378 = torch.constant.int 1
%1201 = torch.aten.select.int %1194, %int0_375, %int1_378 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1202 = torch.aten.item %1201 : !torch.vtensor<[1],si64> -> !torch.int
%1203 = torch.aten.eq.int %1202, %int0_375 : !torch.int, !torch.int -> !torch.bool
%1204 = torch.aten.Int.bool %1203 : !torch.bool -> !torch.int
%int6_379 = torch.constant.int 6
%1205 = torch.aten.mul.int %1204, %int6_379 : !torch.int, !torch.int -> !torch.int
%1206 = torch.aten.add.int %1202, %1205 : !torch.int, !torch.int -> !torch.int
%1207 = torch.prim.ListConstruct %1200, %1206 : (!torch.int, !torch.int) -> !torch.list<int>
%1208 = torch.aten.reshape %1193, %1207 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_380 = torch.constant.float 9.9999997473787516E-6
%int768_381 = torch.constant.int 768
%1209 = torch.prim.ListConstruct %int768_381 : (!torch.int) -> !torch.list<int>
%result0_382, %result1_383, %result2_384 = torch.aten.native_layer_norm %1208, %1209, %38, %39, %float9.999990e-06_380 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_385 = torch.constant.int 0
%int1_386 = torch.constant.int 1
%1210 = torch.aten.transpose.int %34, %int0_385, %int1_386 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%1211 = torch.aten.mm %result0_382, %1210 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%1212 = torch.aten.add.Tensor %1211, %35, %int1_386 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%1213 = torch.aten.relu %1212 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_387 = torch.constant.int 0
%int1_388 = torch.constant.int 1
%1214 = torch.aten.transpose.int %36, %int0_387, %int1_388 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%1215 = torch.aten.mm %1213, %1214 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%1216 = torch.aten.add.Tensor %1215, %37, %int1_388 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_389 = torch.constant.int 1
%1217 = torch.aten.add.Tensor %1208, %1216, %int1_389 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%1218 = torch.vtensor.literal(dense_resource<__57> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_390 = torch.constant.int 0
%int0_391 = torch.constant.int 0
%1219 = torch.aten.select.int %1218, %int0_390, %int0_391 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1220 = torch.aten.item %1219 : !torch.vtensor<[1],si64> -> !torch.int
%1221 = torch.aten.eq.int %1220, %int0_390 : !torch.int, !torch.int -> !torch.bool
%1222 = torch.aten.Int.bool %1221 : !torch.bool -> !torch.int
%int6_392 = torch.constant.int 6
%1223 = torch.aten.mul.int %1222, %int6_392 : !torch.int, !torch.int -> !torch.int
%1224 = torch.aten.add.int %1220, %1223 : !torch.int, !torch.int -> !torch.int
%int1_393 = torch.constant.int 1
%1225 = torch.aten.select.int %1218, %int0_390, %int1_393 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1226 = torch.aten.item %1225 : !torch.vtensor<[1],si64> -> !torch.int
%1227 = torch.aten.eq.int %1226, %int0_390 : !torch.int, !torch.int -> !torch.bool
%1228 = torch.aten.Int.bool %1227 : !torch.bool -> !torch.int
%int768_394 = torch.constant.int 768
%1229 = torch.aten.mul.int %1228, %int768_394 : !torch.int, !torch.int -> !torch.int
%1230 = torch.aten.add.int %1226, %1229 : !torch.int, !torch.int -> !torch.int
%int2_395 = torch.constant.int 2
%1231 = torch.aten.select.int %1218, %int0_390, %int2_395 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1232 = torch.aten.item %1231 : !torch.vtensor<[1],si64> -> !torch.int
%1233 = torch.aten.eq.int %1232, %int0_390 : !torch.int, !torch.int -> !torch.bool
%1234 = torch.aten.Int.bool %1233 : !torch.bool -> !torch.int
%1235 = torch.aten.mul.int %1234, %int0_390 : !torch.int, !torch.int -> !torch.int
%1236 = torch.aten.add.int %1232, %1235 : !torch.int, !torch.int -> !torch.int
%1237 = torch.prim.ListConstruct %1224, %1230, %1236 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1238 = torch.aten.reshape %1217, %1237 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_396 = torch.constant.float 9.9999997473787516E-6
%int768_397 = torch.constant.int 768
%1239 = torch.prim.ListConstruct %int768_397 : (!torch.int) -> !torch.list<int>
%result0_398, %result1_399, %result2_400 = torch.aten.native_layer_norm %1238, %1239, %44, %45, %float9.999990e-06_396 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%1240 = torch.aten.matmul %result0_398, %160 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_401 = torch.constant.int 1
%1241 = torch.aten.add.Tensor %42, %1240, %int1_401 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1242 = torch.vtensor.literal(dense_resource<__58> : tensor<f32>) : !torch.vtensor<[],f32>
%1243 = torch.aten.mul.Tensor %1241, %1242 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%1244 = torch.aten.matmul %result0_398, %161 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_402 = torch.constant.int 1
%1245 = torch.aten.add.Tensor %40, %1244, %int1_402 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1246 = torch.vtensor.literal(dense_resource<__59> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%1247 = torch.vtensor.literal(dense_resource<__60> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_403 = torch.constant.int 0
%int0_404 = torch.constant.int 0
%1248 = torch.aten.select.int %1246, %int0_403, %int0_404 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1249 = torch.aten.item %1248 : !torch.vtensor<[1],si64> -> !torch.int
%1250 = torch.aten.eq.int %1249, %int0_403 : !torch.int, !torch.int -> !torch.bool
%1251 = torch.aten.Int.bool %1250 : !torch.bool -> !torch.int
%int1_405 = torch.constant.int 1
%1252 = torch.aten.mul.int %1251, %int1_405 : !torch.int, !torch.int -> !torch.int
%1253 = torch.aten.add.int %1249, %1252 : !torch.int, !torch.int -> !torch.int
%int1_406 = torch.constant.int 1
%1254 = torch.aten.select.int %1246, %int0_403, %int1_406 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1255 = torch.aten.item %1254 : !torch.vtensor<[1],si64> -> !torch.int
%1256 = torch.aten.eq.int %1255, %int0_403 : !torch.int, !torch.int -> !torch.bool
%1257 = torch.aten.Int.bool %1256 : !torch.bool -> !torch.int
%int6_407 = torch.constant.int 6
%1258 = torch.aten.mul.int %1257, %int6_407 : !torch.int, !torch.int -> !torch.int
%1259 = torch.aten.add.int %1255, %1258 : !torch.int, !torch.int -> !torch.int
%int2_408 = torch.constant.int 2
%1260 = torch.aten.select.int %1246, %int0_403, %int2_408 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1261 = torch.aten.item %1260 : !torch.vtensor<[1],si64> -> !torch.int
%1262 = torch.aten.eq.int %1261, %int0_403 : !torch.int, !torch.int -> !torch.bool
%1263 = torch.aten.Int.bool %1262 : !torch.bool -> !torch.int
%int768_409 = torch.constant.int 768
%1264 = torch.aten.mul.int %1263, %int768_409 : !torch.int, !torch.int -> !torch.int
%1265 = torch.aten.add.int %1261, %1264 : !torch.int, !torch.int -> !torch.int
%int3_410 = torch.constant.int 3
%1266 = torch.aten.select.int %1246, %int0_403, %int3_410 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1267 = torch.aten.item %1266 : !torch.vtensor<[1],si64> -> !torch.int
%1268 = torch.aten.eq.int %1267, %int0_403 : !torch.int, !torch.int -> !torch.bool
%1269 = torch.aten.Int.bool %1268 : !torch.bool -> !torch.int
%1270 = torch.aten.mul.int %1269, %int0_403 : !torch.int, !torch.int -> !torch.int
%1271 = torch.aten.add.int %1267, %1270 : !torch.int, !torch.int -> !torch.int
%1272 = torch.prim.ListConstruct %1253, %1259, %1265, %1271 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1273 = torch.aten.reshape %1245, %1272 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_411 = torch.constant.int 1
%int2_412 = torch.constant.int 2
%1274 = torch.aten.transpose.int %1273, %int1_411, %int2_412 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1275 = torch.aten.matmul %result0_398, %162 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_413 = torch.constant.int 1
%1276 = torch.aten.add.Tensor %41, %1275, %int1_413 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_414 = torch.constant.int 0
%int0_415 = torch.constant.int 0
%1277 = torch.aten.select.int %1247, %int0_414, %int0_415 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1278 = torch.aten.item %1277 : !torch.vtensor<[1],si64> -> !torch.int
%1279 = torch.aten.eq.int %1278, %int0_414 : !torch.int, !torch.int -> !torch.bool
%1280 = torch.aten.Int.bool %1279 : !torch.bool -> !torch.int
%int1_416 = torch.constant.int 1
%1281 = torch.aten.mul.int %1280, %int1_416 : !torch.int, !torch.int -> !torch.int
%1282 = torch.aten.add.int %1278, %1281 : !torch.int, !torch.int -> !torch.int
%int1_417 = torch.constant.int 1
%1283 = torch.aten.select.int %1247, %int0_414, %int1_417 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1284 = torch.aten.item %1283 : !torch.vtensor<[1],si64> -> !torch.int
%1285 = torch.aten.eq.int %1284, %int0_414 : !torch.int, !torch.int -> !torch.bool
%1286 = torch.aten.Int.bool %1285 : !torch.bool -> !torch.int
%int6_418 = torch.constant.int 6
%1287 = torch.aten.mul.int %1286, %int6_418 : !torch.int, !torch.int -> !torch.int
%1288 = torch.aten.add.int %1284, %1287 : !torch.int, !torch.int -> !torch.int
%int2_419 = torch.constant.int 2
%1289 = torch.aten.select.int %1247, %int0_414, %int2_419 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1290 = torch.aten.item %1289 : !torch.vtensor<[1],si64> -> !torch.int
%1291 = torch.aten.eq.int %1290, %int0_414 : !torch.int, !torch.int -> !torch.bool
%1292 = torch.aten.Int.bool %1291 : !torch.bool -> !torch.int
%int768_420 = torch.constant.int 768
%1293 = torch.aten.mul.int %1292, %int768_420 : !torch.int, !torch.int -> !torch.int
%1294 = torch.aten.add.int %1290, %1293 : !torch.int, !torch.int -> !torch.int
%int3_421 = torch.constant.int 3
%1295 = torch.aten.select.int %1247, %int0_414, %int3_421 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1296 = torch.aten.item %1295 : !torch.vtensor<[1],si64> -> !torch.int
%1297 = torch.aten.eq.int %1296, %int0_414 : !torch.int, !torch.int -> !torch.bool
%1298 = torch.aten.Int.bool %1297 : !torch.bool -> !torch.int
%1299 = torch.aten.mul.int %1298, %int0_414 : !torch.int, !torch.int -> !torch.int
%1300 = torch.aten.add.int %1296, %1299 : !torch.int, !torch.int -> !torch.int
%1301 = torch.prim.ListConstruct %1282, %1288, %1294, %1300 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1302 = torch.aten.reshape %1276, %1301 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_422 = torch.constant.int 1
%int2_423 = torch.constant.int 2
%1303 = torch.aten.transpose.int %1302, %int1_422, %int2_423 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1304 = torch.vtensor.literal(dense_resource<__61> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_424 = torch.constant.int 0
%int0_425 = torch.constant.int 0
%1305 = torch.aten.select.int %1304, %int0_424, %int0_425 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1306 = torch.aten.item %1305 : !torch.vtensor<[1],si64> -> !torch.int
%1307 = torch.aten.eq.int %1306, %int0_424 : !torch.int, !torch.int -> !torch.bool
%1308 = torch.aten.Int.bool %1307 : !torch.bool -> !torch.int
%int1_426 = torch.constant.int 1
%1309 = torch.aten.mul.int %1308, %int1_426 : !torch.int, !torch.int -> !torch.int
%1310 = torch.aten.add.int %1306, %1309 : !torch.int, !torch.int -> !torch.int
%int1_427 = torch.constant.int 1
%1311 = torch.aten.select.int %1304, %int0_424, %int1_427 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1312 = torch.aten.item %1311 : !torch.vtensor<[1],si64> -> !torch.int
%1313 = torch.aten.eq.int %1312, %int0_424 : !torch.int, !torch.int -> !torch.bool
%1314 = torch.aten.Int.bool %1313 : !torch.bool -> !torch.int
%int6_428 = torch.constant.int 6
%1315 = torch.aten.mul.int %1314, %int6_428 : !torch.int, !torch.int -> !torch.int
%1316 = torch.aten.add.int %1312, %1315 : !torch.int, !torch.int -> !torch.int
%int2_429 = torch.constant.int 2
%1317 = torch.aten.select.int %1304, %int0_424, %int2_429 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1318 = torch.aten.item %1317 : !torch.vtensor<[1],si64> -> !torch.int
%1319 = torch.aten.eq.int %1318, %int0_424 : !torch.int, !torch.int -> !torch.bool
%1320 = torch.aten.Int.bool %1319 : !torch.bool -> !torch.int
%int768_430 = torch.constant.int 768
%1321 = torch.aten.mul.int %1320, %int768_430 : !torch.int, !torch.int -> !torch.int
%1322 = torch.aten.add.int %1318, %1321 : !torch.int, !torch.int -> !torch.int
%int3_431 = torch.constant.int 3
%1323 = torch.aten.select.int %1304, %int0_424, %int3_431 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1324 = torch.aten.item %1323 : !torch.vtensor<[1],si64> -> !torch.int
%1325 = torch.aten.eq.int %1324, %int0_424 : !torch.int, !torch.int -> !torch.bool
%1326 = torch.aten.Int.bool %1325 : !torch.bool -> !torch.int
%1327 = torch.aten.mul.int %1326, %int0_424 : !torch.int, !torch.int -> !torch.int
%1328 = torch.aten.add.int %1324, %1327 : !torch.int, !torch.int -> !torch.int
%1329 = torch.prim.ListConstruct %1310, %1316, %1322, %1328 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1330 = torch.aten.reshape %1243, %1329 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_432 = torch.constant.int 1
%int2_433 = torch.constant.int 2
%1331 = torch.aten.transpose.int %1330, %int1_432, %int2_433 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1332 = torch.vtensor.literal(dense_resource<__62> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1333 = torch.vtensor.literal(dense_resource<__63> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%1334 = torch.vtensor.literal(dense_resource<__64> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_434 = torch.constant.int 0
%int0_435 = torch.constant.int 0
%1335 = torch.aten.select.int %1332, %int0_434, %int0_435 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1336 = torch.aten.item %1335 : !torch.vtensor<[1],si64> -> !torch.int
%1337 = torch.aten.eq.int %1336, %int0_434 : !torch.int, !torch.int -> !torch.bool
%1338 = torch.aten.Int.bool %1337 : !torch.bool -> !torch.int
%int1_436 = torch.constant.int 1
%1339 = torch.aten.mul.int %1338, %int1_436 : !torch.int, !torch.int -> !torch.int
%1340 = torch.aten.add.int %1336, %1339 : !torch.int, !torch.int -> !torch.int
%int1_437 = torch.constant.int 1
%1341 = torch.aten.select.int %1332, %int0_434, %int1_437 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1342 = torch.aten.item %1341 : !torch.vtensor<[1],si64> -> !torch.int
%1343 = torch.aten.eq.int %1342, %int0_434 : !torch.int, !torch.int -> !torch.bool
%1344 = torch.aten.Int.bool %1343 : !torch.bool -> !torch.int
%int12_438 = torch.constant.int 12
%1345 = torch.aten.mul.int %1344, %int12_438 : !torch.int, !torch.int -> !torch.int
%1346 = torch.aten.add.int %1342, %1345 : !torch.int, !torch.int -> !torch.int
%int2_439 = torch.constant.int 2
%1347 = torch.aten.select.int %1332, %int0_434, %int2_439 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1348 = torch.aten.item %1347 : !torch.vtensor<[1],si64> -> !torch.int
%1349 = torch.aten.eq.int %1348, %int0_434 : !torch.int, !torch.int -> !torch.bool
%1350 = torch.aten.Int.bool %1349 : !torch.bool -> !torch.int
%int6_440 = torch.constant.int 6
%1351 = torch.aten.mul.int %1350, %int6_440 : !torch.int, !torch.int -> !torch.int
%1352 = torch.aten.add.int %1348, %1351 : !torch.int, !torch.int -> !torch.int
%1353 = torch.prim.ListConstruct %1340, %1346, %1352 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1354 = torch.aten.reshape %1331, %1353 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_441 = torch.constant.int 0
%int0_442 = torch.constant.int 0
%1355 = torch.aten.select.int %1333, %int0_441, %int0_442 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1356 = torch.aten.item %1355 : !torch.vtensor<[1],si64> -> !torch.int
%1357 = torch.aten.eq.int %1356, %int0_441 : !torch.int, !torch.int -> !torch.bool
%1358 = torch.aten.Int.bool %1357 : !torch.bool -> !torch.int
%int1_443 = torch.constant.int 1
%1359 = torch.aten.mul.int %1358, %int1_443 : !torch.int, !torch.int -> !torch.int
%1360 = torch.aten.add.int %1356, %1359 : !torch.int, !torch.int -> !torch.int
%int1_444 = torch.constant.int 1
%1361 = torch.aten.select.int %1333, %int0_441, %int1_444 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1362 = torch.aten.item %1361 : !torch.vtensor<[1],si64> -> !torch.int
%1363 = torch.aten.eq.int %1362, %int0_441 : !torch.int, !torch.int -> !torch.bool
%1364 = torch.aten.Int.bool %1363 : !torch.bool -> !torch.int
%int12_445 = torch.constant.int 12
%1365 = torch.aten.mul.int %1364, %int12_445 : !torch.int, !torch.int -> !torch.int
%1366 = torch.aten.add.int %1362, %1365 : !torch.int, !torch.int -> !torch.int
%int2_446 = torch.constant.int 2
%1367 = torch.aten.select.int %1333, %int0_441, %int2_446 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1368 = torch.aten.item %1367 : !torch.vtensor<[1],si64> -> !torch.int
%1369 = torch.aten.eq.int %1368, %int0_441 : !torch.int, !torch.int -> !torch.bool
%1370 = torch.aten.Int.bool %1369 : !torch.bool -> !torch.int
%int6_447 = torch.constant.int 6
%1371 = torch.aten.mul.int %1370, %int6_447 : !torch.int, !torch.int -> !torch.int
%1372 = torch.aten.add.int %1368, %1371 : !torch.int, !torch.int -> !torch.int
%1373 = torch.prim.ListConstruct %1360, %1366, %1372 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1374 = torch.aten.reshape %1274, %1373 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int0_448 = torch.constant.int 0
%int0_449 = torch.constant.int 0
%1375 = torch.aten.select.int %1334, %int0_448, %int0_449 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1376 = torch.aten.item %1375 : !torch.vtensor<[1],si64> -> !torch.int
%1377 = torch.aten.eq.int %1376, %int0_448 : !torch.int, !torch.int -> !torch.bool
%1378 = torch.aten.Int.bool %1377 : !torch.bool -> !torch.int
%int1_450 = torch.constant.int 1
%1379 = torch.aten.mul.int %1378, %int1_450 : !torch.int, !torch.int -> !torch.int
%1380 = torch.aten.add.int %1376, %1379 : !torch.int, !torch.int -> !torch.int
%int1_451 = torch.constant.int 1
%1381 = torch.aten.select.int %1334, %int0_448, %int1_451 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1382 = torch.aten.item %1381 : !torch.vtensor<[1],si64> -> !torch.int
%1383 = torch.aten.eq.int %1382, %int0_448 : !torch.int, !torch.int -> !torch.bool
%1384 = torch.aten.Int.bool %1383 : !torch.bool -> !torch.int
%int12_452 = torch.constant.int 12
%1385 = torch.aten.mul.int %1384, %int12_452 : !torch.int, !torch.int -> !torch.int
%1386 = torch.aten.add.int %1382, %1385 : !torch.int, !torch.int -> !torch.int
%int2_453 = torch.constant.int 2
%1387 = torch.aten.select.int %1334, %int0_448, %int2_453 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1388 = torch.aten.item %1387 : !torch.vtensor<[1],si64> -> !torch.int
%1389 = torch.aten.eq.int %1388, %int0_448 : !torch.int, !torch.int -> !torch.bool
%1390 = torch.aten.Int.bool %1389 : !torch.bool -> !torch.int
%int6_454 = torch.constant.int 6
%1391 = torch.aten.mul.int %1390, %int6_454 : !torch.int, !torch.int -> !torch.int
%1392 = torch.aten.add.int %1388, %1391 : !torch.int, !torch.int -> !torch.int
%1393 = torch.prim.ListConstruct %1380, %1386, %1392 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1394 = torch.aten.reshape %1303, %1393 : !torch.vtensor<[1,12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[12,6,64],f32>
%int1_455 = torch.constant.int 1
%int2_456 = torch.constant.int 2
%1395 = torch.aten.transpose.int %1374, %int1_455, %int2_456 : !torch.vtensor<[12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,6],f32>
%1396 = torch.aten.matmul %1354, %1395 : !torch.vtensor<[12,6,64],f32>, !torch.vtensor<[12,64,6],f32> -> !torch.vtensor<[12,6,6],f32>
%1397 = torch.vtensor.literal(dense_resource<__65> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_457 = torch.constant.int 0
%int0_458 = torch.constant.int 0
%1398 = torch.aten.select.int %1397, %int0_457, %int0_458 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1399 = torch.aten.item %1398 : !torch.vtensor<[1],si64> -> !torch.int
%1400 = torch.aten.eq.int %1399, %int0_457 : !torch.int, !torch.int -> !torch.bool
%1401 = torch.aten.Int.bool %1400 : !torch.bool -> !torch.int
%int12_459 = torch.constant.int 12
%1402 = torch.aten.mul.int %1401, %int12_459 : !torch.int, !torch.int -> !torch.int
%1403 = torch.aten.add.int %1399, %1402 : !torch.int, !torch.int -> !torch.int
%int1_460 = torch.constant.int 1
%1404 = torch.aten.select.int %1397, %int0_457, %int1_460 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1405 = torch.aten.item %1404 : !torch.vtensor<[1],si64> -> !torch.int
%1406 = torch.aten.eq.int %1405, %int0_457 : !torch.int, !torch.int -> !torch.bool
%1407 = torch.aten.Int.bool %1406 : !torch.bool -> !torch.int
%int6_461 = torch.constant.int 6
%1408 = torch.aten.mul.int %1407, %int6_461 : !torch.int, !torch.int -> !torch.int
%1409 = torch.aten.add.int %1405, %1408 : !torch.int, !torch.int -> !torch.int
%int2_462 = torch.constant.int 2
%1410 = torch.aten.select.int %1397, %int0_457, %int2_462 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1411 = torch.aten.item %1410 : !torch.vtensor<[1],si64> -> !torch.int
%1412 = torch.aten.eq.int %1411, %int0_457 : !torch.int, !torch.int -> !torch.bool
%1413 = torch.aten.Int.bool %1412 : !torch.bool -> !torch.int
%int6_463 = torch.constant.int 6
%1414 = torch.aten.mul.int %1413, %int6_463 : !torch.int, !torch.int -> !torch.int
%1415 = torch.aten.add.int %1411, %1414 : !torch.int, !torch.int -> !torch.int
%int3_464 = torch.constant.int 3
%1416 = torch.aten.select.int %1397, %int0_457, %int3_464 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1417 = torch.aten.item %1416 : !torch.vtensor<[1],si64> -> !torch.int
%1418 = torch.aten.eq.int %1417, %int0_457 : !torch.int, !torch.int -> !torch.bool
%1419 = torch.aten.Int.bool %1418 : !torch.bool -> !torch.int
%1420 = torch.aten.mul.int %1419, %int0_457 : !torch.int, !torch.int -> !torch.int
%1421 = torch.aten.add.int %1417, %1420 : !torch.int, !torch.int -> !torch.int
%1422 = torch.prim.ListConstruct %1403, %1409, %1415, %1421 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1423 = torch.aten.reshape %1396, %1422 : !torch.vtensor<[12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,6],f32>
%int1_465 = torch.constant.int 1
%1424 = torch.aten.add.Tensor %1423, %285, %int1_465 : !torch.vtensor<[1,12,6,6],f32>, !torch.vtensor<[?,?,6,6],f32>, !torch.int -> !torch.vtensor<[?,12,6,6],f32>
%1425 = torch.vtensor.literal(dense_resource<__66> : tensor<f32>) : !torch.vtensor<[],f32>
%1426 = torch.aten.maximum %1424, %1425 : !torch.vtensor<[?,12,6,6],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[?,12,6,6],f32>
%1427 = torch.vtensor.literal(dense_resource<__67> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_466 = torch.constant.int 0
%int0_467 = torch.constant.int 0
%1428 = torch.aten.select.int %1427, %int0_466, %int0_467 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1429 = torch.aten.item %1428 : !torch.vtensor<[1],si64> -> !torch.int
%1430 = torch.aten.eq.int %1429, %int0_466 : !torch.int, !torch.int -> !torch.bool
%1431 = torch.aten.Int.bool %1430 : !torch.bool -> !torch.int
%int-1_468 = torch.constant.int -1
%1432 = torch.aten.mul.int %1431, %int-1_468 : !torch.int, !torch.int -> !torch.int
%1433 = torch.aten.add.int %1429, %1432 : !torch.int, !torch.int -> !torch.int
%int1_469 = torch.constant.int 1
%1434 = torch.aten.select.int %1427, %int0_466, %int1_469 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1435 = torch.aten.item %1434 : !torch.vtensor<[1],si64> -> !torch.int
%1436 = torch.aten.eq.int %1435, %int0_466 : !torch.int, !torch.int -> !torch.bool
%1437 = torch.aten.Int.bool %1436 : !torch.bool -> !torch.int
%int12_470 = torch.constant.int 12
%1438 = torch.aten.mul.int %1437, %int12_470 : !torch.int, !torch.int -> !torch.int
%1439 = torch.aten.add.int %1435, %1438 : !torch.int, !torch.int -> !torch.int
%int2_471 = torch.constant.int 2
%1440 = torch.aten.select.int %1427, %int0_466, %int2_471 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1441 = torch.aten.item %1440 : !torch.vtensor<[1],si64> -> !torch.int
%1442 = torch.aten.eq.int %1441, %int0_466 : !torch.int, !torch.int -> !torch.bool
%1443 = torch.aten.Int.bool %1442 : !torch.bool -> !torch.int
%int6_472 = torch.constant.int 6
%1444 = torch.aten.mul.int %1443, %int6_472 : !torch.int, !torch.int -> !torch.int
%1445 = torch.aten.add.int %1441, %1444 : !torch.int, !torch.int -> !torch.int
%1446 = torch.prim.ListConstruct %1433, %1439, %1445 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1447 = torch.aten.reshape %1426, %1446 : !torch.vtensor<[?,12,6,6],f32>, !torch.list<int> -> !torch.vtensor<[12,6,6],f32>
%int2_473 = torch.constant.int 2
%none_474 = torch.constant.none
%1448 = torch.aten.softmax.int %1447, %int2_473, %none_474 : !torch.vtensor<[12,6,6],f32>, !torch.int, !torch.none -> !torch.vtensor<[12,6,6],f32>
%1449 = torch.aten.matmul %1448, %1394 : !torch.vtensor<[12,6,6],f32>, !torch.vtensor<[12,6,64],f32> -> !torch.vtensor<[12,6,64],f32>
%1450 = torch.vtensor.literal(dense_resource<__68> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_475 = torch.constant.int 0
%int0_476 = torch.constant.int 0
%1451 = torch.aten.select.int %1450, %int0_475, %int0_476 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1452 = torch.aten.item %1451 : !torch.vtensor<[1],si64> -> !torch.int
%1453 = torch.aten.eq.int %1452, %int0_475 : !torch.int, !torch.int -> !torch.bool
%1454 = torch.aten.Int.bool %1453 : !torch.bool -> !torch.int
%int12_477 = torch.constant.int 12
%1455 = torch.aten.mul.int %1454, %int12_477 : !torch.int, !torch.int -> !torch.int
%1456 = torch.aten.add.int %1452, %1455 : !torch.int, !torch.int -> !torch.int
%int1_478 = torch.constant.int 1
%1457 = torch.aten.select.int %1450, %int0_475, %int1_478 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1458 = torch.aten.item %1457 : !torch.vtensor<[1],si64> -> !torch.int
%1459 = torch.aten.eq.int %1458, %int0_475 : !torch.int, !torch.int -> !torch.bool
%1460 = torch.aten.Int.bool %1459 : !torch.bool -> !torch.int
%int6_479 = torch.constant.int 6
%1461 = torch.aten.mul.int %1460, %int6_479 : !torch.int, !torch.int -> !torch.int
%1462 = torch.aten.add.int %1458, %1461 : !torch.int, !torch.int -> !torch.int
%int2_480 = torch.constant.int 2
%1463 = torch.aten.select.int %1450, %int0_475, %int2_480 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1464 = torch.aten.item %1463 : !torch.vtensor<[1],si64> -> !torch.int
%1465 = torch.aten.eq.int %1464, %int0_475 : !torch.int, !torch.int -> !torch.bool
%1466 = torch.aten.Int.bool %1465 : !torch.bool -> !torch.int
%int64_481 = torch.constant.int 64
%1467 = torch.aten.mul.int %1466, %int64_481 : !torch.int, !torch.int -> !torch.int
%1468 = torch.aten.add.int %1464, %1467 : !torch.int, !torch.int -> !torch.int
%int3_482 = torch.constant.int 3
%1469 = torch.aten.select.int %1450, %int0_475, %int3_482 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1470 = torch.aten.item %1469 : !torch.vtensor<[1],si64> -> !torch.int
%1471 = torch.aten.eq.int %1470, %int0_475 : !torch.int, !torch.int -> !torch.bool
%1472 = torch.aten.Int.bool %1471 : !torch.bool -> !torch.int
%1473 = torch.aten.mul.int %1472, %int0_475 : !torch.int, !torch.int -> !torch.int
%1474 = torch.aten.add.int %1470, %1473 : !torch.int, !torch.int -> !torch.int
%1475 = torch.prim.ListConstruct %1456, %1462, %1468, %1474 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1476 = torch.aten.reshape %1449, %1475 : !torch.vtensor<[12,6,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,6,64],f32>
%int1_483 = torch.constant.int 1
%int2_484 = torch.constant.int 2
%1477 = torch.aten.transpose.int %1476, %int1_483, %int2_484 : !torch.vtensor<[1,12,6,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,6,12,64],f32>
%1478 = torch.vtensor.literal(dense_resource<__69> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_485 = torch.constant.int 0
%int0_486 = torch.constant.int 0
%1479 = torch.aten.select.int %1478, %int0_485, %int0_486 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1480 = torch.aten.item %1479 : !torch.vtensor<[1],si64> -> !torch.int
%1481 = torch.aten.eq.int %1480, %int0_485 : !torch.int, !torch.int -> !torch.bool
%1482 = torch.aten.Int.bool %1481 : !torch.bool -> !torch.int
%int1_487 = torch.constant.int 1
%1483 = torch.aten.mul.int %1482, %int1_487 : !torch.int, !torch.int -> !torch.int
%1484 = torch.aten.add.int %1480, %1483 : !torch.int, !torch.int -> !torch.int
%int1_488 = torch.constant.int 1
%1485 = torch.aten.select.int %1478, %int0_485, %int1_488 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1486 = torch.aten.item %1485 : !torch.vtensor<[1],si64> -> !torch.int
%1487 = torch.aten.eq.int %1486, %int0_485 : !torch.int, !torch.int -> !torch.bool
%1488 = torch.aten.Int.bool %1487 : !torch.bool -> !torch.int
%int6_489 = torch.constant.int 6
%1489 = torch.aten.mul.int %1488, %int6_489 : !torch.int, !torch.int -> !torch.int
%1490 = torch.aten.add.int %1486, %1489 : !torch.int, !torch.int -> !torch.int
%int2_490 = torch.constant.int 2
%1491 = torch.aten.select.int %1478, %int0_485, %int2_490 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1492 = torch.aten.item %1491 : !torch.vtensor<[1],si64> -> !torch.int
%1493 = torch.aten.eq.int %1492, %int0_485 : !torch.int, !torch.int -> !torch.bool
%1494 = torch.aten.Int.bool %1493 : !torch.bool -> !torch.int
%int12_491 = torch.constant.int 12
%1495 = torch.aten.mul.int %1494, %int12_491 : !torch.int, !torch.int -> !torch.int
%1496 = torch.aten.add.int %1492, %1495 : !torch.int, !torch.int -> !torch.int
%1497 = torch.prim.ListConstruct %1484, %1490, %1496 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1498 = torch.aten.reshape %1477, %1497 : !torch.vtensor<[1,6,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%1499 = torch.aten.matmul %1498, %163 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_492 = torch.constant.int 1
%1500 = torch.aten.add.Tensor %43, %1499, %int1_492 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int1_493 = torch.constant.int 1
%1501 = torch.aten.add.Tensor %1238, %1500, %int1_493 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1502 = torch.vtensor.literal(dense_resource<__70> : tensor<2xsi64>) : !torch.vtensor<[2],si64>
%int0_494 = torch.constant.int 0
%int0_495 = torch.constant.int 0
%1503 = torch.aten.select.int %1502, %int0_494, %int0_495 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1504 = torch.aten.item %1503 : !torch.vtensor<[1],si64> -> !torch.int
%1505 = torch.aten.eq.int %1504, %int0_494 : !torch.int, !torch.int -> !torch.bool
%1506 = torch.aten.Int.bool %1505 : !torch.bool -> !torch.int
%int1_496 = torch.constant.int 1
%1507 = torch.aten.mul.int %1506, %int1_496 : !torch.int, !torch.int -> !torch.int
%1508 = torch.aten.add.int %1504, %1507 : !torch.int, !torch.int -> !torch.int
%int1_497 = torch.constant.int 1
%1509 = torch.aten.select.int %1502, %int0_494, %int1_497 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1510 = torch.aten.item %1509 : !torch.vtensor<[1],si64> -> !torch.int
%1511 = torch.aten.eq.int %1510, %int0_494 : !torch.int, !torch.int -> !torch.bool
%1512 = torch.aten.Int.bool %1511 : !torch.bool -> !torch.int
%int6_498 = torch.constant.int 6
%1513 = torch.aten.mul.int %1512, %int6_498 : !torch.int, !torch.int -> !torch.int
%1514 = torch.aten.add.int %1510, %1513 : !torch.int, !torch.int -> !torch.int
%1515 = torch.prim.ListConstruct %1508, %1514 : (!torch.int, !torch.int) -> !torch.list<int>
%1516 = torch.aten.reshape %1501, %1515 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[6,768],f32>
%float9.999990e-06_499 = torch.constant.float 9.9999997473787516E-6
%int768_500 = torch.constant.int 768
%1517 = torch.prim.ListConstruct %int768_500 : (!torch.int) -> !torch.list<int>
%result0_501, %result1_502, %result2_503 = torch.aten.native_layer_norm %1516, %1517, %50, %51, %float9.999990e-06_499 : !torch.vtensor<[6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,1],f32>, !torch.vtensor<[6,1],f32>
%int0_504 = torch.constant.int 0
%int1_505 = torch.constant.int 1
%1518 = torch.aten.transpose.int %46, %int0_504, %int1_505 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32>
%1519 = torch.aten.mm %result0_501, %1518 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[6,3072],f32>
%1520 = torch.aten.add.Tensor %1519, %47, %int1_505 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[6,3072],f32>
%1521 = torch.aten.relu %1520 : !torch.vtensor<[6,3072],f32> -> !torch.vtensor<[6,3072],f32>
%int0_506 = torch.constant.int 0
%int1_507 = torch.constant.int 1
%1522 = torch.aten.transpose.int %48, %int0_506, %int1_507 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32>
%1523 = torch.aten.mm %1521, %1522 : !torch.vtensor<[6,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[6,768],f32>
%1524 = torch.aten.add.Tensor %1523, %49, %int1_507 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%int1_508 = torch.constant.int 1
%1525 = torch.aten.add.Tensor %1516, %1524, %int1_508 : !torch.vtensor<[6,768],f32>, !torch.vtensor<[6,768],f32>, !torch.int -> !torch.vtensor<[6,768],f32>
%1526 = torch.vtensor.literal(dense_resource<__71> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
%int0_509 = torch.constant.int 0
%int0_510 = torch.constant.int 0
%1527 = torch.aten.select.int %1526, %int0_509, %int0_510 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1528 = torch.aten.item %1527 : !torch.vtensor<[1],si64> -> !torch.int
%1529 = torch.aten.eq.int %1528, %int0_509 : !torch.int, !torch.int -> !torch.bool
%1530 = torch.aten.Int.bool %1529 : !torch.bool -> !torch.int
%int6_511 = torch.constant.int 6
%1531 = torch.aten.mul.int %1530, %int6_511 : !torch.int, !torch.int -> !torch.int
%1532 = torch.aten.add.int %1528, %1531 : !torch.int, !torch.int -> !torch.int
%int1_512 = torch.constant.int 1
%1533 = torch.aten.select.int %1526, %int0_509, %int1_512 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1534 = torch.aten.item %1533 : !torch.vtensor<[1],si64> -> !torch.int
%1535 = torch.aten.eq.int %1534, %int0_509 : !torch.int, !torch.int -> !torch.bool
%1536 = torch.aten.Int.bool %1535 : !torch.bool -> !torch.int
%int768_513 = torch.constant.int 768
%1537 = torch.aten.mul.int %1536, %int768_513 : !torch.int, !torch.int -> !torch.int
%1538 = torch.aten.add.int %1534, %1537 : !torch.int, !torch.int -> !torch.int
%int2_514 = torch.constant.int 2
%1539 = torch.aten.select.int %1526, %int0_509, %int2_514 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1540 = torch.aten.item %1539 : !torch.vtensor<[1],si64> -> !torch.int
%1541 = torch.aten.eq.int %1540, %int0_509 : !torch.int, !torch.int -> !torch.bool
%1542 = torch.aten.Int.bool %1541 : !torch.bool -> !torch.int
%1543 = torch.aten.mul.int %1542, %int0_509 : !torch.int, !torch.int -> !torch.int
%1544 = torch.aten.add.int %1540, %1543 : !torch.int, !torch.int -> !torch.int
%1545 = torch.prim.ListConstruct %1532, %1538, %1544 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1546 = torch.aten.reshape %1525, %1545 : !torch.vtensor<[6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,768],f32>
%float9.999990e-06_515 = torch.constant.float 9.9999997473787516E-6
%int768_516 = torch.constant.int 768
%1547 = torch.prim.ListConstruct %int768_516 : (!torch.int) -> !torch.list<int>
%result0_517, %result1_518, %result2_519 = torch.aten.native_layer_norm %1546, %1547, %56, %57, %float9.999990e-06_515 : !torch.vtensor<[1,6,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[1,6,1],f32>, !torch.vtensor<[1,6,1],f32>
%1548 = torch.aten.matmul %result0_517, %164 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_520 = torch.constant.int 1
%1549 = torch.aten.add.Tensor %54, %1548, %int1_520 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1550 = torch.vtensor.literal(dense_resource<__72> : tensor<f32>) : !torch.vtensor<[],f32>
%1551 = torch.aten.mul.Tensor %1549, %1550 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,6,768],f32>
%1552 = torch.aten.matmul %result0_517, %165 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_521 = torch.constant.int 1
%1553 = torch.aten.add.Tensor %52, %1552, %int1_521 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%1554 = torch.vtensor.literal(dense_resource<__73> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%1555 = torch.vtensor.literal(dense_resource<__74> : tensor<4xsi64>) : !torch.vtensor<[4],si64>
%int0_522 = torch.constant.int 0
%int0_523 = torch.constant.int 0
%1556 = torch.aten.select.int %1554, %int0_522, %int0_523 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1557 = torch.aten.item %1556 : !torch.vtensor<[1],si64> -> !torch.int
%1558 = torch.aten.eq.int %1557, %int0_522 : !torch.int, !torch.int -> !torch.bool
%1559 = torch.aten.Int.bool %1558 : !torch.bool -> !torch.int
%int1_524 = torch.constant.int 1
%1560 = torch.aten.mul.int %1559, %int1_524 : !torch.int, !torch.int -> !torch.int
%1561 = torch.aten.add.int %1557, %1560 : !torch.int, !torch.int -> !torch.int
%int1_525 = torch.constant.int 1
%1562 = torch.aten.select.int %1554, %int0_522, %int1_525 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1563 = torch.aten.item %1562 : !torch.vtensor<[1],si64> -> !torch.int
%1564 = torch.aten.eq.int %1563, %int0_522 : !torch.int, !torch.int -> !torch.bool
%1565 = torch.aten.Int.bool %1564 : !torch.bool -> !torch.int
%int6_526 = torch.constant.int 6
%1566 = torch.aten.mul.int %1565, %int6_526 : !torch.int, !torch.int -> !torch.int
%1567 = torch.aten.add.int %1563, %1566 : !torch.int, !torch.int -> !torch.int
%int2_527 = torch.constant.int 2
%1568 = torch.aten.select.int %1554, %int0_522, %int2_527 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1569 = torch.aten.item %1568 : !torch.vtensor<[1],si64> -> !torch.int
%1570 = torch.aten.eq.int %1569, %int0_522 : !torch.int, !torch.int -> !torch.bool
%1571 = torch.aten.Int.bool %1570 : !torch.bool -> !torch.int
%int768_528 = torch.constant.int 768
%1572 = torch.aten.mul.int %1571, %int768_528 : !torch.int, !torch.int -> !torch.int
%1573 = torch.aten.add.int %1569, %1572 : !torch.int, !torch.int -> !torch.int
%int3_529 = torch.constant.int 3
%1574 = torch.aten.select.int %1554, %int0_522, %int3_529 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1575 = torch.aten.item %1574 : !torch.vtensor<[1],si64> -> !torch.int
%1576 = torch.aten.eq.int %1575, %int0_522 : !torch.int, !torch.int -> !torch.bool
%1577 = torch.aten.Int.bool %1576 : !torch.bool -> !torch.int
%1578 = torch.aten.mul.int %1577, %int0_522 : !torch.int, !torch.int -> !torch.int
%1579 = torch.aten.add.int %1575, %1578 : !torch.int, !torch.int -> !torch.int
%1580 = torch.prim.ListConstruct %1561, %1567, %1573, %1579 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%1581 = torch.aten.reshape %1553, %1580 : !torch.vtensor<[1,6,768],f32>, !torch.list<int> -> !torch.vtensor<[1,6,12,64],f32>
%int1_530 = torch.constant.int 1
%int2_531 = torch.constant.int 2
%1582 = torch.aten.transpose.int %1581, %int1_530, %int2_531 : !torch.vtensor<[1,6,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,6,64],f32>
%1583 = torch.aten.matmul %result0_517, %166 : !torch.vtensor<[1,6,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,6,768],f32>
%int1_532 = torch.constant.int 1
%1584 = torch.aten.add.Tensor %53, %1583, %int1_532 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,6,768],f32>, !torch.int -> !torch.vtensor<[1,6,768],f32>
%int0_533 = torch.constant.int 0
%int0_534 = torch.constant.int 0
%1585 = torch.aten.select.int %1555, %int0_533, %int0_534 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1586 = torch.aten.item %1585 : !torch.vtensor<[1],si64> -> !torch.int
%1587 = torch.aten.eq.int %1586, %int0_533 : !torch.int, !torch.int -> !torch.bool
%1588 = torch.aten.Int.bool %1587 : !torch.bool -> !torch.int
%int1_535 = torch.constant.int 1
%1589 = torch.aten.mul.int %1588, %int1_535 : !torch.int, !torch.int -> !torch.int
%1590 = torch.aten.add.int %1586, %1589 : !torch.int, !torch.int -> !torch.int
%int1_536 = torch.constant.int 1
%1591 = torch.aten.select.int %1555, %int0_533, %int1_536 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1592 = torch.aten.item %1591 : !torch.vtensor<[1],si64> -> !torch.int
%1593 = torch.aten.eq.int %1592, %int0_533 : !torch.int, !torch.int -> !torch.bool
%1594 = torch.aten
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment