Created
January 11, 2023 06:31
-
-
Save vivekkhandelwal1/1059e00b91f48bb84a4be2f14e04d41d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc = loc(unknown) | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func private @__torch__.torch.fx.graph_module._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%false = torch.constant.bool false loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%1 = torch.aten.argmax %arg1, %none_0, %false : !torch.tensor, !torch.none, !torch.bool -> !torch.tensor loc(#loc1) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_0._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_0._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_0._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_0._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_0._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%true_0 = torch.constant.bool true loc(#loc2) | |
%int0 = torch.constant.int 0 loc(#loc3) | |
%1 = torch.aten.argmax %arg1, %int0, %true_0 : !torch.tensor, !torch.int, !torch.bool -> !torch.tensor loc(#loc4) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_1._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_1._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_1._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_1._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_1._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%false = torch.constant.bool false loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc5) | |
%1 = torch.aten.argmax %arg1, %int1, %false : !torch.tensor, !torch.int, !torch.bool -> !torch.tensor loc(#loc6) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_2._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_2._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_2._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_2._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_2._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown), %arg3: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor, tensor, tensor> { | |
%int-1 = torch.constant.int -1 loc(#loc) | |
%none_0 = torch.constant.none loc(#loc7) | |
%false = torch.constant.bool false loc(#loc8) | |
%int0 = torch.constant.int 0 loc(#loc9) | |
%1:4 = torch.operator "aten._embedding_bag_forward_only"(%arg1, %arg2, %arg3, %false, %int0, %false, %none_0, %false, %int-1) : (!torch.tensor, !torch.tensor, !torch.tensor, !torch.bool, !torch.int, !torch.bool, !torch.none, !torch.bool, !torch.int) -> (!torch.tensor, !torch.tensor, !torch.tensor, !torch.tensor) loc(#loc10) | |
%2 = torch.prim.TupleConstruct %1#0, %1#1, %1#2, %1#3 : !torch.tensor, !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tuple<tensor, tensor, tensor, tensor> loc(#loc) | |
return %2 : !torch.tuple<tensor, tensor, tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_3._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_3._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_3._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_3._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_3._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown), %arg3: !torch.tensor loc(unknown), %arg4: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%none_0 = torch.constant.none loc(#loc11) | |
%1 = torch.prim.ListConstruct %none_0, %none_0, %arg2, %none_0, %arg3, %arg4 : (!torch.none, !torch.none, !torch.tensor, !torch.none, !torch.tensor, !torch.tensor) -> !torch.list<optional<tensor>> loc(#loc) | |
%2 = torch.aten.index.Tensor %arg1, %1 : !torch.tensor, !torch.list<optional<tensor>> -> !torch.tensor loc(#loc12) | |
return %2 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_4._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_4._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_4._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_4._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_4._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown), %arg3: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%1 = torch.prim.ListConstruct %arg2, %arg3 : (!torch.tensor, !torch.tensor) -> !torch.list<tensor> loc(#loc) | |
%2 = torch.aten.index.Tensor_hacked_twin %arg1, %1 : !torch.tensor, !torch.list<tensor> -> !torch.tensor loc(#loc13) | |
return %2 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_5._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_5._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_5._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_5._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_5._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%none_0 = torch.constant.none loc(#loc14) | |
%1 = torch.prim.ListConstruct %none_0, %arg2, %none_0 : (!torch.none, !torch.tensor, !torch.none) -> !torch.list<optional<tensor>> loc(#loc) | |
%2 = torch.aten.index.Tensor %arg1, %1 : !torch.tensor, !torch.list<optional<tensor>> -> !torch.tensor loc(#loc15) | |
return %2 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_6._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_6._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_6._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_6._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_6._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%none_0 = torch.constant.none loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc16) | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 loc(#loc17) | |
%int1 = torch.constant.int 1 loc(#loc18) | |
%int2 = torch.constant.int 2 loc(#loc19) | |
%int3 = torch.constant.int 3 loc(#loc20) | |
%1 = torch.aten.slice.Tensor %arg1, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc21) | |
%2 = torch.aten.select.int %1, %int1, %int2 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc22) | |
%3 = torch.aten.slice.Tensor %arg1, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc23) | |
%4 = torch.aten.select.int %3, %int1, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc24) | |
%5 = torch.aten.sub.Tensor %2, %4, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc25) | |
%6 = torch.aten.slice.Tensor %arg1, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc26) | |
%7 = torch.aten.select.int %6, %int1, %int3 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc27) | |
%8 = torch.aten.slice.Tensor %arg1, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc28) | |
%9 = torch.aten.select.int %8, %int1, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc29) | |
%10 = torch.aten.sub.Tensor %7, %9, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc30) | |
%11 = torch.aten.mul.Tensor %5, %10 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc31) | |
%12 = torch.aten.slice.Tensor %arg2, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc32) | |
%13 = torch.aten.select.int %12, %int1, %int2 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc33) | |
%14 = torch.aten.slice.Tensor %arg2, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc34) | |
%15 = torch.aten.select.int %14, %int1, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc35) | |
%16 = torch.aten.sub.Tensor %13, %15, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc36) | |
%17 = torch.aten.slice.Tensor %arg2, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc37) | |
%18 = torch.aten.select.int %17, %int1, %int3 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc38) | |
%19 = torch.aten.slice.Tensor %arg2, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc39) | |
%20 = torch.aten.select.int %19, %int1, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc40) | |
%21 = torch.aten.sub.Tensor %18, %20, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc41) | |
%22 = torch.aten.mul.Tensor %16, %21 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc42) | |
%23 = torch.aten.slice.Tensor %arg1, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc43) | |
%24 = torch.aten.slice.Tensor %23, %int1, %int0, %int2, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc44) | |
%25 = torch.aten.slice.Tensor %arg2, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc45) | |
%26 = torch.aten.slice.Tensor %25, %int1, %int0, %int2, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc46) | |
%27 = torch.aten.maximum %24, %26 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc47) | |
%28 = torch.aten.slice.Tensor %arg1, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc48) | |
%29 = torch.aten.slice.Tensor %28, %int1, %int2, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc49) | |
%30 = torch.aten.slice.Tensor %arg2, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc50) | |
%31 = torch.aten.slice.Tensor %30, %int1, %int2, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc51) | |
%32 = torch.aten.minimum %29, %31 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc52) | |
%33 = torch.aten.sub.Tensor %32, %27, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc53) | |
%34 = torch.aten.clamp %33, %int0, %none_0 : !torch.tensor, !torch.int, !torch.none -> !torch.tensor loc(#loc54) | |
%35 = torch.aten.slice.Tensor %34, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc55) | |
%36 = torch.aten.select.int %35, %int1, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc56) | |
%37 = torch.aten.slice.Tensor %34, %int0, %int0, %int9223372036854775807, %int1 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor loc(#loc57) | |
%38 = torch.aten.select.int %37, %int1, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc58) | |
%39 = torch.aten.mul.Tensor %36, %38 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc59) | |
%40 = torch.aten.add.Tensor %11, %22, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc60) | |
%41 = torch.aten.sub.Tensor %40, %39, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc61) | |
%42 = torch.aten.div.Tensor %39, %41 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc62) | |
return %42 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_7._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_7._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_7._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_7._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_7._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%int3 = torch.constant.int 3 loc(#loc63) | |
%1 = torch.aten.add.Tensor %arg1, %arg2, %int3 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc64) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_8._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_8._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_8._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_8._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_8._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%int3 = torch.constant.int 3 loc(#loc65) | |
%1 = torch.aten.add.Tensor %arg1, %arg2, %int3 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc66) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_9._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_9._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_9._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_9._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_9._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%float2.300000e00 = torch.constant.float 2.300000e+00 loc(#loc67) | |
%1 = torch.aten.add.Tensor %arg1, %arg2, %float2.300000e00 : !torch.tensor, !torch.tensor, !torch.float -> !torch.tensor loc(#loc68) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_10._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_10._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_10._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_10._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_10._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%int2 = torch.constant.int 2 loc(#loc69) | |
%1 = torch.aten.add.Tensor %arg1, %arg2, %int2 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc70) | |
return %1 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_11._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_11._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_11._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_11._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_11._lambda"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown), %arg3: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%float-1.500000e01 = torch.constant.float -1.500000e+01 loc(#loc71) | |
%float-5.000000e00 = torch.constant.float -5.000000e+00 loc(#loc72) | |
%float-2.000000e01 = torch.constant.float -2.000000e+01 loc(#loc73) | |
%none_0 = torch.constant.none loc(#loc) | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc74) | |
%float1.000000e01 = torch.constant.float 1.000000e+01 loc(#loc75) | |
%float3.000000e00 = torch.constant.float 3.000000e+00 loc(#loc76) | |
%int0 = torch.constant.int 0 loc(#loc77) | |
%int1 = torch.constant.int 1 loc(#loc78) | |
%int2 = torch.constant.int 2 loc(#loc79) | |
%1 = torch.aten.uniform_ %arg1, %float1.000000e00, %float1.000000e01, %none_0 : !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor loc(#loc80) | |
%2 = torch.aten.uniform_ %arg2, %float-2.000000e01, %float-5.000000e00, %none_0 : !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor loc(#loc81) | |
%3 = torch.aten.uniform_ %arg3, %float-1.500000e01, %float3.000000e00, %none_0 : !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor loc(#loc82) | |
%4 = torch.prim.ListConstruct %int0, %int1, %int2 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%5 = torch.prims.var %1, %4, %int1, %none_0 : !torch.tensor, !torch.list<int>, !torch.int, !torch.none -> !torch.tensor loc(#loc83) | |
%6 = torch.prims.sqrt %5 : !torch.tensor -> !torch.tensor loc(#loc84) | |
%7 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%8 = torch.aten.view %6, %7 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc85) | |
%9 = torch.prim.ListConstruct %int0, %int1, %int2 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%10 = torch.prims.var %2, %9, %int1, %none_0 : !torch.tensor, !torch.list<int>, !torch.int, !torch.none -> !torch.tensor loc(#loc86) | |
%11 = torch.prims.sqrt %10 : !torch.tensor -> !torch.tensor loc(#loc87) | |
%12 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%13 = torch.aten.view %11, %12 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc88) | |
%14 = torch.prim.ListConstruct %int0, %int1, %int2 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%15 = torch.prims.var %3, %14, %int1, %none_0 : !torch.tensor, !torch.list<int>, !torch.int, !torch.none -> !torch.tensor loc(#loc89) | |
%16 = torch.prims.sqrt %15 : !torch.tensor -> !torch.tensor loc(#loc90) | |
%17 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%18 = torch.aten.view %16, %17 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc91) | |
%19 = torch.prim.ListConstruct %8, %13, %18 : (!torch.tensor, !torch.tensor, !torch.tensor) -> !torch.list<tensor> loc(#loc) | |
%20 = torch.aten.cat %19, %int0 : !torch.list<tensor>, !torch.int -> !torch.tensor loc(#loc92) | |
%21 = torch.aten.mean %1, %none_0 : !torch.tensor, !torch.none -> !torch.tensor loc(#loc93) | |
%22 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%23 = torch.aten.view %21, %22 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc94) | |
%24 = torch.aten.mean %2, %none_0 : !torch.tensor, !torch.none -> !torch.tensor loc(#loc95) | |
%25 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%26 = torch.aten.view %24, %25 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc96) | |
%27 = torch.aten.mean %3, %none_0 : !torch.tensor, !torch.none -> !torch.tensor loc(#loc97) | |
%28 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%29 = torch.aten.view %27, %28 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc98) | |
%30 = torch.prim.ListConstruct %23, %26, %29 : (!torch.tensor, !torch.tensor, !torch.tensor) -> !torch.list<tensor> loc(#loc) | |
%31 = torch.aten.cat %30, %int0 : !torch.list<tensor>, !torch.int -> !torch.tensor loc(#loc99) | |
%32 = torch.prim.TupleConstruct %20, %31 : !torch.tensor, !torch.tensor -> !torch.tuple<tensor, tensor> loc(#loc) | |
return %32 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_12._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_12._lambda"> loc(unknown)) -> !torch.str { | |
%1 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_12._lambda"> -> !torch.str loc(#loc) | |
return %1 : !torch.str loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.fx.graph_module.___torch_mangle_12._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_12._lambda"> loc(unknown), %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[1000],f64>} loc(unknown)) -> !torch.tuple<tensor, tensor, tensor> { | |
%int4 = torch.constant.int 4 loc(#loc100) | |
%false = torch.constant.bool false loc(#loc101) | |
%cpu = torch.constant.device "cpu" loc(#loc) | |
%int7 = torch.constant.int 7 loc(#loc102) | |
%true_0 = torch.constant.bool true loc(#loc103) | |
%none_1 = torch.constant.none loc(#loc) | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc) | |
%float0.000000e00 = torch.constant.float 0.000000e+00 loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc104) | |
%int1 = torch.constant.int 1 loc(#loc105) | |
%int999 = torch.constant.int 999 loc(#loc106) | |
%int2 = torch.constant.int 2 loc(#loc107) | |
%int1000 = torch.constant.int 1000 loc(#loc108) | |
%float1.000000e-03 = torch.constant.float 1.000000e-03 loc(#loc109) | |
%1 = torch.aten.uniform %arg1, %float0.000000e00, %float1.000000e00, %none_1 : !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor loc(#loc110) | |
%2 = torch.aten.uniform %arg1, %float0.000000e00, %float1.000000e00, %none_1 : !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor loc(#loc111) | |
%3 = torch.aten.unsqueeze %1, %int0 : !torch.tensor, !torch.int -> !torch.tensor loc(#loc112) | |
%4 = torch.aten.unsqueeze %2, %int0 : !torch.tensor, !torch.int -> !torch.tensor loc(#loc113) | |
%5 = torch.prim.ListConstruct %3, %4 : (!torch.tensor, !torch.tensor) -> !torch.list<tensor> loc(#loc) | |
%6 = torch.aten.cat %5, %int0 : !torch.list<tensor>, !torch.int -> !torch.tensor loc(#loc114) | |
%7 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%8 = torch.aten.mean.dim %6, %7, %true_0, %none_1 : !torch.tensor, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor loc(#loc115) | |
%9 = torch.aten.sub.Tensor %6, %8, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc116) | |
%10 = torch.aten.t %9 : !torch.tensor -> !torch.tensor loc(#loc117) | |
%11 = torch.aten.mm %9, %10 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc118) | |
%12 = torch.aten.div.Scalar %11, %int999 : !torch.tensor, !torch.int -> !torch.tensor loc(#loc119) | |
%13 = torch.aten.select.int %12, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc120) | |
%14 = torch.aten.select.int %13, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc121) | |
%15 = torch.aten.select.int %12, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc122) | |
%16 = torch.aten.select.int %15, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc123) | |
%17 = torch.aten.select.int %12, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc124) | |
%18 = torch.aten.select.int %17, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc125) | |
%19 = torch.aten.mul.Tensor %16, %18 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc126) | |
%20 = torch.aten.sqrt %19 : !torch.tensor -> !torch.tensor loc(#loc127) | |
%21 = torch.aten.div.Tensor %14, %20 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc128) | |
%22 = torch.prim.ListConstruct %int2, %int1000 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%23 = torch.aten.empty.memory_format %22, %int7, %none_1, %cpu, %false, %none_1 : !torch.list<int>, !torch.int, !torch.none, !torch.Device, !torch.bool, !torch.none -> !torch.tensor loc(#loc129) | |
%24 = torch.aten.uniform %23, %float0.000000e00, %float1.000000e00, %none_1 : !torch.tensor, !torch.float, !torch.float, !torch.none -> !torch.tensor loc(#loc130) | |
%25 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%26 = torch.aten.mean.dim %24, %25, %true_0, %none_1 : !torch.tensor, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor loc(#loc131) | |
%27 = torch.aten.sub.Tensor %24, %26, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc132) | |
%28 = torch.aten.t %27 : !torch.tensor -> !torch.tensor loc(#loc133) | |
%29 = torch.aten.mm %27, %28 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc134) | |
%30 = torch.aten.div.Scalar %29, %int999 : !torch.tensor, !torch.int -> !torch.tensor loc(#loc135) | |
%31 = torch.aten.select.int %30, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc136) | |
%32 = torch.aten.select.int %31, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc137) | |
%33 = torch.aten.select.int %30, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc138) | |
%34 = torch.aten.select.int %33, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc139) | |
%35 = torch.aten.select.int %30, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc140) | |
%36 = torch.aten.select.int %35, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc141) | |
%37 = torch.aten.mul.Tensor %34, %36 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc142) | |
%38 = torch.aten.sqrt %37 : !torch.tensor -> !torch.tensor loc(#loc143) | |
%39 = torch.aten.div.Tensor %32, %38 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc144) | |
%40 = torch.aten.t %24 : !torch.tensor -> !torch.tensor loc(#loc145) | |
%41 = torch.aten.clone %40, %int0 : !torch.tensor, !torch.int -> !torch.tensor loc(#loc146) | |
%42 = torch.prim.ListConstruct %int2, %int1000 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%43 = torch.aten._unsafe_view %41, %42 : !torch.tensor, !torch.list<int> -> !torch.tensor loc(#loc147) | |
%44 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%45 = torch.aten.mean.dim %43, %44, %true_0, %none_1 : !torch.tensor, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor loc(#loc148) | |
%46 = torch.aten.sub.Tensor %43, %45, %int1 : !torch.tensor, !torch.tensor, !torch.int -> !torch.tensor loc(#loc149) | |
%47 = torch.aten.t %46 : !torch.tensor -> !torch.tensor loc(#loc150) | |
%48 = torch.aten.mm %46, %47 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc151) | |
%49 = torch.aten.div.Scalar %48, %int999 : !torch.tensor, !torch.int -> !torch.tensor loc(#loc152) | |
%50 = torch.aten.select.int %49, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc153) | |
%51 = torch.aten.select.int %50, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc154) | |
%52 = torch.aten.select.int %49, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc155) | |
%53 = torch.aten.select.int %52, %int0, %int0 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc156) | |
%54 = torch.aten.select.int %49, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc157) | |
%55 = torch.aten.select.int %54, %int0, %int1 : !torch.tensor, !torch.int, !torch.int -> !torch.tensor loc(#loc158) | |
%56 = torch.aten.mul.Tensor %53, %55 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc159) | |
%57 = torch.aten.sqrt %56 : !torch.tensor -> !torch.tensor loc(#loc160) | |
%58 = torch.aten.div.Tensor %51, %57 : !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc161) | |
%59 = torch.aten.abs %21 : !torch.tensor -> !torch.tensor loc(#loc162) | |
%60 = torch.aten.lt.Scalar %59, %float1.000000e-03 : !torch.tensor, !torch.float -> !torch.tensor loc(#loc163) | |
%61 = torch.operator "aten.scalar_tensor"(%int2, %int4, %int0, %cpu, %none_1) : (!torch.int, !torch.int, !torch.int, !torch.Device, !torch.none) -> !torch.tensor loc(#loc164) | |
%62 = torch.operator "aten.scalar_tensor"(%int1, %int4, %int0, %cpu, %none_1) : (!torch.int, !torch.int, !torch.int, !torch.Device, !torch.none) -> !torch.tensor loc(#loc165) | |
%63 = torch.aten.where.self %60, %62, %61 : !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc166) | |
%64 = torch.aten.abs %39 : !torch.tensor -> !torch.tensor loc(#loc167) | |
%65 = torch.aten.lt.Scalar %64, %float1.000000e-03 : !torch.tensor, !torch.float -> !torch.tensor loc(#loc168) | |
%66 = torch.operator "aten.scalar_tensor"(%int2, %int4, %int0, %cpu, %none_1) : (!torch.int, !torch.int, !torch.int, !torch.Device, !torch.none) -> !torch.tensor loc(#loc169) | |
%67 = torch.operator "aten.scalar_tensor"(%int1, %int4, %int0, %cpu, %none_1) : (!torch.int, !torch.int, !torch.int, !torch.Device, !torch.none) -> !torch.tensor loc(#loc170) | |
%68 = torch.aten.where.self %65, %67, %66 : !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc171) | |
%69 = torch.aten.abs %58 : !torch.tensor -> !torch.tensor loc(#loc172) | |
%70 = torch.aten.lt.Scalar %69, %float1.000000e-03 : !torch.tensor, !torch.float -> !torch.tensor loc(#loc173) | |
%71 = torch.operator "aten.scalar_tensor"(%int2, %int4, %int0, %cpu, %none_1) : (!torch.int, !torch.int, !torch.int, !torch.Device, !torch.none) -> !torch.tensor loc(#loc174) | |
%72 = torch.operator "aten.scalar_tensor"(%int1, %int4, %int0, %cpu, %none_1) : (!torch.int, !torch.int, !torch.int, !torch.Device, !torch.none) -> !torch.tensor loc(#loc175) | |
%73 = torch.aten.where.self %70, %72, %71 : !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tensor loc(#loc176) | |
%74 = torch.prim.TupleConstruct %63, %68, %73 : !torch.tensor, !torch.tensor, !torch.tensor -> !torch.tuple<tensor, tensor, tensor> loc(#loc) | |
return %74 : !torch.tuple<tensor, tensor, tensor> loc(#loc) | |
} loc(#loc) | |
torch.class_type @__torch__.torch.fx.graph_module.___torch_mangle_12._lambda { | |
torch.attr private "training" : !torch.bool loc(#loc) | |
torch.attr private "_is_full_backward_hook" : !torch.optional<bool> loc(#loc) | |
torch.attr private "_code" : !torch.str loc(#loc) | |
torch.method private "__code_getter", @__torch__.torch.fx.graph_module.___torch_mangle_12._lambda.__code_getter loc(#loc) | |
torch.method "forward", @__torch__.torch.fx.graph_module.___torch_mangle_12._lambda.forward loc(#loc) | |
} loc(#loc) | |
%true = torch.constant.bool true loc(#loc) | |
%none = torch.constant.none loc(#loc) | |
%str = torch.constant.str "\0A\0A\0Adef forward(self, arg0_1):\0A uniform = torch.ops.aten.uniform(arg0_1)\0A uniform_1 = torch.ops.aten.uniform(arg0_1); arg0_1 = None\0A unsqueeze = torch.ops.aten.unsqueeze(uniform, 0); uniform = None\0A unsqueeze_1 = torch.ops.aten.unsqueeze(uniform_1, 0); uniform_1 = None\0A cat = torch.ops.aten.cat([unsqueeze, unsqueeze_1]); unsqueeze = unsqueeze_1 = None\0A mean = torch.ops.aten.mean(cat, [1], True)\0A sub = torch.ops.aten.sub(cat, mean); cat = mean = None\0A t = torch.ops.aten.t(sub)\0A mm = torch.ops.aten.mm(sub, t); sub = t = None\0A div = torch.ops.aten.div(mm, 999); mm = None\0A select = torch.ops.aten.select(div, 0, 0)\0A select_1 = torch.ops.aten.select(select, 0, 1); select = None\0A select_2 = torch.ops.aten.select(div, 0, 0)\0A select_3 = torch.ops.aten.select(select_2, 0, 0); select_2 = None\0A select_4 = torch.ops.aten.select(div, 0, 1); div = None\0A select_5 = torch.ops.aten.select(select_4, 0, 1); select_4 = None\0A mul = torch.ops.aten.mul(select_3, select_5); select_3 = select_5 = None\0A sqrt = torch.ops.aten.sqrt(mul); mul = None\0A div_1 = torch.ops.aten.div(select_1, sqrt); select_1 = sqrt = None\0A empty = torch.ops.aten.empty([2, 1000], dtype = torch.float64, device = device(type='cpu'), pin_memory = False)\0A uniform_2 = torch.ops.aten.uniform(empty); empty = None\0A mean_1 = torch.ops.aten.mean(uniform_2, [1], True)\0A sub_1 = torch.ops.aten.sub(uniform_2, mean_1); mean_1 = None\0A t_1 = torch.ops.aten.t(sub_1)\0A mm_1 = torch.ops.aten.mm(sub_1, t_1); sub_1 = t_1 = None\0A div_2 = torch.ops.aten.div(mm_1, 999); mm_1 = None\0A select_6 = torch.ops.aten.select(div_2, 0, 0)\0A select_7 = torch.ops.aten.select(select_6, 0, 1); select_6 = None\0A select_8 = torch.ops.aten.select(div_2, 0, 0)\0A select_9 = torch.ops.aten.select(select_8, 0, 0); select_8 = None\0A select_10 = torch.ops.aten.select(div_2, 0, 1); div_2 = None\0A select_11 = torch.ops.aten.select(select_10, 0, 1); select_10 = None\0A mul_1 = torch.ops.aten.mul(select_9, select_11); select_9 = select_11 = None\0A sqrt_1 = torch.ops.aten.sqrt(mul_1); mul_1 = None\0A div_3 = torch.ops.aten.div(select_7, sqrt_1); select_7 = sqrt_1 = None\0A t_2 = torch.ops.aten.t(uniform_2); uniform_2 = None\0A clone = torch.ops.aten.clone(t_2, memory_format = torch.contiguous_format); t_2 = None\0A _unsafe_view = torch.ops.aten._unsafe_view(clone, [2, 1000]); clone = None\0A mean_2 = torch.ops.aten.mean(_unsafe_view, [1], True)\0A sub_2 = torch.ops.aten.sub(_unsafe_view, mean_2); _unsafe_view = mean_2 = None\0A t_3 = torch.ops.aten.t(sub_2)\0A mm_2 = torch.ops.aten.mm(sub_2, t_3); sub_2 = t_3 = None\0A div_4 = torch.ops.aten.div(mm_2, 999); mm_2 = None\0A select_12 = torch.ops.aten.select(div_4, 0, 0)\0A select_13 = torch.ops.aten.select(select_12, 0, 1); select_12 = None\0A select_14 = torch.ops.aten.select(div_4, 0, 0)\0A select_15 = torch.ops.aten.select(select_14, 0, 0); select_14 = None\0A select_16 = torch.ops.aten.select(div_4, 0, 1); div_4 = None\0A select_17 = torch.ops.aten.select(select_16, 0, 1); select_16 = None\0A mul_2 = torch.ops.aten.mul(select_15, select_17); select_15 = select_17 = None\0A sqrt_2 = torch.ops.aten.sqrt(mul_2); mul_2 = None\0A div_5 = torch.ops.aten.div(select_13, sqrt_2); select_13 = sqrt_2 = None\0A abs_1 = torch.ops.aten.abs(div_1); div_1 = None\0A lt = torch.ops.aten.lt(abs_1, 0.001); abs_1 = None\0A scalar_tensor = torch.ops.aten.scalar_tensor(2, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'))\0A scalar_tensor_1 = torch.ops.aten.scalar_tensor(1, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'))\0A where = torch.ops.aten.where(lt, scalar_tensor_1, scalar_tensor); lt = scalar_tensor_1 = scalar_tensor = None\0A abs_2 = torch.ops.aten.abs(div_3); div_3 = None\0A lt_1 = torch.ops.aten.lt(abs_2, 0.001); abs_2 = None\0A scalar_tensor_2 = torch.ops.aten.scalar_tensor(2, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'))\0A scalar_tensor_3 = torch.ops.aten.scalar_tensor(1, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'))\0A where_1 = torch.ops.aten.where(lt_1, scalar_tensor_3, scalar_tensor_2); lt_1 = scalar_tensor_3 = scalar_tensor_2 = None\0A abs_3 = torch.ops.aten.abs(div_5); div_5 = None\0A lt_2 = torch.ops.aten.lt(abs_3, 0.001); abs_3 = None\0A scalar_tensor_4 = torch.ops.aten.scalar_tensor(2, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'))\0A scalar_tensor_5 = torch.ops.aten.scalar_tensor(1, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'))\0A where_2 = torch.ops.aten.where(lt_2, scalar_tensor_5, scalar_tensor_4); lt_2 = scalar_tensor_5 = scalar_tensor_4 = None\0A return (where, where_1, where_2)\0A " loc(#loc) | |
%0 = torch.nn_module { | |
torch.slot "training", %true : !torch.bool loc(#loc) | |
torch.slot "_is_full_backward_hook", %none : !torch.none loc(#loc) | |
torch.slot "_code", %str : !torch.str loc(#loc) | |
} : !torch.nn.Module<"__torch__.torch.fx.graph_module.___torch_mangle_12._lambda"> loc(#loc) | |
} loc(#loc) | |
#loc1 = loc("<eval_with_key>.5":5:13) | |
#loc2 = loc("<eval_with_key>.11":5:46) | |
#loc3 = loc("<eval_with_key>.11":5:43) | |
#loc4 = loc("<eval_with_key>.11":5:13) | |
#loc5 = loc("<eval_with_key>.17":5:43) | |
#loc6 = loc("<eval_with_key>.17":5:13) | |
#loc7 = loc("<eval_with_key>.22":5:118) | |
#loc8 = loc("<eval_with_key>.22":5:101) | |
#loc9 = loc("<eval_with_key>.22":5:108) | |
#loc10 = loc("<eval_with_key>.22":5:34) | |
#loc11 = loc("<eval_with_key>.28":5:42) | |
#loc12 = loc("<eval_with_key>.28":5:12) | |
#loc13 = loc("<eval_with_key>.34":5:12) | |
#loc14 = loc("<eval_with_key>.40":5:42) | |
#loc15 = loc("<eval_with_key>.40":5:12) | |
#loc16 = loc("<eval_with_key>.46":5:43) | |
#loc17 = loc("<eval_with_key>.46":5:49) | |
#loc18 = loc("<eval_with_key>.46":6:44) | |
#loc19 = loc("<eval_with_key>.46":6:47) | |
#loc20 = loc("<eval_with_key>.46":11:49) | |
#loc21 = loc("<eval_with_key>.46":5:14) | |
#loc22 = loc("<eval_with_key>.46":6:13) | |
#loc23 = loc("<eval_with_key>.46":7:14) | |
#loc24 = loc("<eval_with_key>.46":8:15) | |
#loc25 = loc("<eval_with_key>.46":9:10) | |
#loc26 = loc("<eval_with_key>.46":10:14) | |
#loc27 = loc("<eval_with_key>.46":11:15) | |
#loc28 = loc("<eval_with_key>.46":12:14) | |
#loc29 = loc("<eval_with_key>.46":13:15) | |
#loc30 = loc("<eval_with_key>.46":14:12) | |
#loc31 = loc("<eval_with_key>.46":15:10) | |
#loc32 = loc("<eval_with_key>.46":16:14) | |
#loc33 = loc("<eval_with_key>.46":17:15) | |
#loc34 = loc("<eval_with_key>.46":18:14) | |
#loc35 = loc("<eval_with_key>.46":19:15) | |
#loc36 = loc("<eval_with_key>.46":20:12) | |
#loc37 = loc("<eval_with_key>.46":21:14) | |
#loc38 = loc("<eval_with_key>.46":22:15) | |
#loc39 = loc("<eval_with_key>.46":23:14) | |
#loc40 = loc("<eval_with_key>.46":24:15) | |
#loc41 = loc("<eval_with_key>.46":25:12) | |
#loc42 = loc("<eval_with_key>.46":26:12) | |
#loc43 = loc("<eval_with_key>.46":27:14) | |
#loc44 = loc("<eval_with_key>.46":28:15) | |
#loc45 = loc("<eval_with_key>.46":29:15) | |
#loc46 = loc("<eval_with_key>.46":30:15) | |
#loc47 = loc("<eval_with_key>.46":31:14) | |
#loc48 = loc("<eval_with_key>.46":32:15) | |
#loc49 = loc("<eval_with_key>.46":33:15) | |
#loc50 = loc("<eval_with_key>.46":34:15) | |
#loc51 = loc("<eval_with_key>.46":35:15) | |
#loc52 = loc("<eval_with_key>.46":36:14) | |
#loc53 = loc("<eval_with_key>.46":37:12) | |
#loc54 = loc("<eval_with_key>.46":38:12) | |
#loc55 = loc("<eval_with_key>.46":39:15) | |
#loc56 = loc("<eval_with_key>.46":40:15) | |
#loc57 = loc("<eval_with_key>.46":41:15) | |
#loc58 = loc("<eval_with_key>.46":42:15) | |
#loc59 = loc("<eval_with_key>.46":43:12) | |
#loc60 = loc("<eval_with_key>.46":44:10) | |
#loc61 = loc("<eval_with_key>.46":45:12) | |
#loc62 = loc("<eval_with_key>.46":46:10) | |
#loc63 = loc("<eval_with_key>.52":5:53) | |
#loc64 = loc("<eval_with_key>.52":5:10) | |
#loc65 = loc("<eval_with_key>.58":5:53) | |
#loc66 = loc("<eval_with_key>.58":5:10) | |
#loc67 = loc("<eval_with_key>.64":5:53) | |
#loc68 = loc("<eval_with_key>.64":5:10) | |
#loc69 = loc("<eval_with_key>.70":5:53) | |
#loc70 = loc("<eval_with_key>.70":5:10) | |
#loc71 = loc("<eval_with_key>.75":7:49) | |
#loc72 = loc("<eval_with_key>.75":6:56) | |
#loc73 = loc("<eval_with_key>.75":6:49) | |
#loc74 = loc("<eval_with_key>.75":5:47) | |
#loc75 = loc("<eval_with_key>.75":5:52) | |
#loc76 = loc("<eval_with_key>.75":7:56) | |
#loc77 = loc("<eval_with_key>.75":8:41) | |
#loc78 = loc("<eval_with_key>.75":8:44) | |
#loc79 = loc("<eval_with_key>.75":8:47) | |
#loc80 = loc("<eval_with_key>.75":5:15) | |
#loc81 = loc("<eval_with_key>.75":6:17) | |
#loc82 = loc("<eval_with_key>.75":7:17) | |
#loc83 = loc("<eval_with_key>.75":8:10) | |
#loc84 = loc("<eval_with_key>.75":9:11) | |
#loc85 = loc("<eval_with_key>.75":10:11) | |
#loc86 = loc("<eval_with_key>.75":11:12) | |
#loc87 = loc("<eval_with_key>.75":12:13) | |
#loc88 = loc("<eval_with_key>.75":13:13) | |
#loc89 = loc("<eval_with_key>.75":14:12) | |
#loc90 = loc("<eval_with_key>.75":15:13) | |
#loc91 = loc("<eval_with_key>.75":16:13) | |
#loc92 = loc("<eval_with_key>.75":17:10) | |
#loc93 = loc("<eval_with_key>.75":18:11) | |
#loc94 = loc("<eval_with_key>.75":19:13) | |
#loc95 = loc("<eval_with_key>.75":20:13) | |
#loc96 = loc("<eval_with_key>.75":21:13) | |
#loc97 = loc("<eval_with_key>.75":22:13) | |
#loc98 = loc("<eval_with_key>.75":23:13) | |
#loc99 = loc("<eval_with_key>.75":24:12) | |
#loc100 = loc("<eval_with_key>.80":59:60) | |
#loc101 = loc("<eval_with_key>.80":24:109) | |
#loc102 = loc("<eval_with_key>.80":24:52) | |
#loc103 = loc("<eval_with_key>.80":10:41) | |
#loc104 = loc("<eval_with_key>.80":7:50) | |
#loc105 = loc("<eval_with_key>.80":10:37) | |
#loc106 = loc("<eval_with_key>.80":14:33) | |
#loc107 = loc("<eval_with_key>.80":24:34) | |
#loc108 = loc("<eval_with_key>.80":24:37) | |
#loc109 = loc("<eval_with_key>.80":58:34) | |
#loc110 = loc("<eval_with_key>.80":5:14) | |
#loc111 = loc("<eval_with_key>.80":6:16) | |
#loc112 = loc("<eval_with_key>.80":7:16) | |
#loc113 = loc("<eval_with_key>.80":8:18) | |
#loc114 = loc("<eval_with_key>.80":9:10) | |
#loc115 = loc("<eval_with_key>.80":10:11) | |
#loc116 = loc("<eval_with_key>.80":11:10) | |
#loc117 = loc("<eval_with_key>.80":12:8) | |
#loc118 = loc("<eval_with_key>.80":13:9) | |
#loc119 = loc("<eval_with_key>.80":14:10) | |
#loc120 = loc("<eval_with_key>.80":15:13) | |
#loc121 = loc("<eval_with_key>.80":16:15) | |
#loc122 = loc("<eval_with_key>.80":17:15) | |
#loc123 = loc("<eval_with_key>.80":18:15) | |
#loc124 = loc("<eval_with_key>.80":19:15) | |
#loc125 = loc("<eval_with_key>.80":20:15) | |
#loc126 = loc("<eval_with_key>.80":21:10) | |
#loc127 = loc("<eval_with_key>.80":22:11) | |
#loc128 = loc("<eval_with_key>.80":23:12) | |
#loc129 = loc("<eval_with_key>.80":24:12) | |
#loc130 = loc("<eval_with_key>.80":25:16) | |
#loc131 = loc("<eval_with_key>.80":26:13) | |
#loc132 = loc("<eval_with_key>.80":27:12) | |
#loc133 = loc("<eval_with_key>.80":28:10) | |
#loc134 = loc("<eval_with_key>.80":29:11) | |
#loc135 = loc("<eval_with_key>.80":30:12) | |
#loc136 = loc("<eval_with_key>.80":31:15) | |
#loc137 = loc("<eval_with_key>.80":32:15) | |
#loc138 = loc("<eval_with_key>.80":33:15) | |
#loc139 = loc("<eval_with_key>.80":34:15) | |
#loc140 = loc("<eval_with_key>.80":35:16) | |
#loc141 = loc("<eval_with_key>.80":36:16) | |
#loc142 = loc("<eval_with_key>.80":37:12) | |
#loc143 = loc("<eval_with_key>.80":38:13) | |
#loc144 = loc("<eval_with_key>.80":39:12) | |
#loc145 = loc("<eval_with_key>.80":40:10) | |
#loc146 = loc("<eval_with_key>.80":41:12) | |
#loc147 = loc("<eval_with_key>.80":42:19) | |
#loc148 = loc("<eval_with_key>.80":43:13) | |
#loc149 = loc("<eval_with_key>.80":44:12) | |
#loc150 = loc("<eval_with_key>.80":45:10) | |
#loc151 = loc("<eval_with_key>.80":46:11) | |
#loc152 = loc("<eval_with_key>.80":47:12) | |
#loc153 = loc("<eval_with_key>.80":48:16) | |
#loc154 = loc("<eval_with_key>.80":49:16) | |
#loc155 = loc("<eval_with_key>.80":50:16) | |
#loc156 = loc("<eval_with_key>.80":51:16) | |
#loc157 = loc("<eval_with_key>.80":52:16) | |
#loc158 = loc("<eval_with_key>.80":53:16) | |
#loc159 = loc("<eval_with_key>.80":54:12) | |
#loc160 = loc("<eval_with_key>.80":55:13) | |
#loc161 = loc("<eval_with_key>.80":56:12) | |
#loc162 = loc("<eval_with_key>.80":57:12) | |
#loc163 = loc("<eval_with_key>.80":58:9) | |
#loc164 = loc("<eval_with_key>.80":59:20) | |
#loc165 = loc("<eval_with_key>.80":60:22) | |
#loc166 = loc("<eval_with_key>.80":61:12) | |
#loc167 = loc("<eval_with_key>.80":62:12) | |
#loc168 = loc("<eval_with_key>.80":63:11) | |
#loc169 = loc("<eval_with_key>.80":64:22) | |
#loc170 = loc("<eval_with_key>.80":65:22) | |
#loc171 = loc("<eval_with_key>.80":66:14) | |
#loc172 = loc("<eval_with_key>.80":67:12) | |
#loc173 = loc("<eval_with_key>.80":68:11) | |
#loc174 = loc("<eval_with_key>.80":69:22) | |
#loc175 = loc("<eval_with_key>.80":70:22) | |
#loc176 = loc("<eval_with_key>.80":71:14) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment