Skip to content

Instantly share code, notes, and snippets.

@ProfFan
Last active October 20, 2021 17:39
Show Gist options
  • Save ProfFan/9a7788d3a99a27b2fc863d8e0d89e848 to your computer and use it in GitHub Desktop.
Save ProfFan/9a7788d3a99a27b2fc863d8e0d89e848 to your computer and use it in GitHub Desktop.
swift-api-crash-sr-13263.sil
// pullback of BatchNorm.callAsFunction(_:)
sil private @$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGFAA0aB13FloatingPointRzlTJpUSpSr : $@convention(thin) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>, @owned _AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>) -> @out BatchNorm<τ_0_0>.TangentVector {
// %0 // user: %440
// %1 // users: %211, %144
// %2 // user: %70
bb0(%0 : $*BatchNorm<τ_0_0>.TangentVector, %1 : $Tensor<τ_0_0>, %2 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>):
%3 = metatype $@thin Tensor<τ_0_0>.Type // users: %238, %236, %219, %218, %418, %417, %414, %413, %346, %344, %340, %339, %337, %334, %330, %329, %304, %303, %294, %293, %171, %169, %152, %151, %126, %125, %121, %96, %95, %91, %87, %86, %64, %61, %55, %52, %47, %45, %41, %38, %33, %31, %26, %24, %11, %5
// function_ref static Tensor<>.zero.getter
%4 = function_ref @$s10TensorFlow0A0VAASjRzrlE4zeroACyxGvgZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %418, %414, %340, %330, %304, %303, %294, %293, %126, %125, %121, %96, %95, %91, %87, %86, %64, %61, %55, %52, %47, %45, %41, %38, %33, %31, %26, %24, %11, %5
%5 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %6, %123, %116, %93, %84
%6 = struct_extract %5 : $Tensor<τ_0_0>, #Tensor.handle // user: %7
%7 = struct_extract %6 : $TensorHandle<τ_0_0>, #TensorHandle.handle // users: %10, %9, %8
strong_retain %7 : $_AnyTensorHandle // id: %8
strong_retain %7 : $_AnyTensorHandle // id: %9
strong_retain %7 : $_AnyTensorHandle // id: %10
%11 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %12, %119, %111, %89, %79
%12 = struct_extract %11 : $Tensor<τ_0_0>, #Tensor.handle // user: %13
%13 = struct_extract %12 : $TensorHandle<τ_0_0>, #TensorHandle.handle // users: %16, %18, %17
%14 = metatype $@thin BatchNorm<τ_0_0>.TangentVector.Type // users: %67, %58, %49, %43, %35, %29, %19
// function_ref static BatchNorm.TangentVector.zero.getter
%15 = function_ref @$s10TensorFlow9BatchNormV13TangentVectorV4zeroAEyx_GvgZ : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // users: %67, %58, %49, %43, %35, %29, %19
strong_retain %13 : $_AnyTensorHandle // id: %16
strong_retain %13 : $_AnyTensorHandle // id: %17
strong_retain %13 : $_AnyTensorHandle // id: %18
%19 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // users: %21, %20, %105, %73
%20 = struct_extract %19 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %22
%21 = struct_extract %19 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %23
retain_value %20 : $Tensor<τ_0_0> // id: %22
retain_value %21 : $Tensor<τ_0_0> // id: %23
%24 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %403, %362, %25
debug_value %24 : $Tensor<τ_0_0>, var, name "scale" // id: %25
%26 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %402, %359, %27
debug_value %26 : $Tensor<τ_0_0>, var, name "offset" // id: %27
%28 = alloc_stack $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // users: %351, %347, %395, %391, %439, %399, %355, %30, %427, %455
%29 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // user: %30
store %29 to %28 : $*BatchNorm<τ_0_0>.TangentVector // id: %30
%31 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %315, %32
debug_value %31 : $Tensor<τ_0_0>, var, name "scale" // id: %32
%33 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %314, %34
debug_value %33 : $Tensor<τ_0_0>, var, name "offset" // id: %34
%35 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // users: %36, %310, %312
debug_value %35 : $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // id: %36
%37 = alloc_stack $Tensor<τ_0_0>, var, name "scale" // users: %281, %361, %308, %302, %288, %331, %327, %39, %337, %454
%38 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %39
store %38 to %37 : $*Tensor<τ_0_0> // id: %39
%40 = alloc_stack $Tensor<τ_0_0>, var, name "offset" // users: %273, %358, %298, %292, %280, %341, %338, %42, %346, %453
%41 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %42
store %41 to %40 : $*Tensor<τ_0_0> // id: %42
%43 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // users: %44, %267, %271
debug_value %43 : $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // id: %44
%45 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %249, %182, %46
debug_value %45 : $Tensor<τ_0_0>, var, name "scale" // id: %46
%47 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %246, %179, %48
debug_value %47 : $Tensor<τ_0_0>, var, name "offset" // id: %48
%49 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // users: %50, %174, %176, %241, %243
debug_value %49 : $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // id: %50
%51 = alloc_stack $Tensor<τ_0_0>, var, name "scale" // users: %113, %248, %123, %116, %53, %236, %452
%52 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %53
store %52 to %51 : $*Tensor<τ_0_0> // id: %53
%54 = alloc_stack $Tensor<τ_0_0>, var, name "offset" // users: %108, %245, %119, %111, %56, %238, %451
%55 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %56
store %55 to %54 : $*Tensor<τ_0_0> // id: %56
%57 = alloc_stack $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // users: %101, %97, %105, %59, %222, %450
%58 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // user: %59
store %58 to %57 : $*BatchNorm<τ_0_0>.TangentVector // id: %59
%60 = alloc_stack $Tensor<τ_0_0>, var, name "scale" // users: %81, %181, %93, %84, %62, %169, %449
%61 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %62
store %61 to %60 : $*Tensor<τ_0_0> // id: %62
%63 = alloc_stack $Tensor<τ_0_0>, var, name "offset" // users: %76, %178, %89, %79, %65, %171, %448
%64 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %65
store %64 to %63 : $*Tensor<τ_0_0> // id: %65
%66 = alloc_stack $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // users: %72, %71, %73, %155, %447
%67 = apply %15<τ_0_0>(%14) : $@convention(method) <τ_0_0 where τ_0_0 : TensorFlowFloatingPoint> (@thin BatchNorm<τ_0_0>.TangentVector.Type) -> @owned BatchNorm<τ_0_0>.TangentVector // users: %68, %69
%68 = struct_extract %67 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %75
%69 = struct_extract %67 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %74
%70 = struct_extract %2 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %129
%71 = struct_element_addr %66 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // users: %172, %194
%72 = struct_element_addr %66 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // users: %173, %196
store %19 to %66 : $*BatchNorm<τ_0_0>.TangentVector // id: %73
release_value %69 : $Tensor<τ_0_0> // id: %74
release_value %68 : $Tensor<τ_0_0> // id: %75
%76 = struct_element_addr %63 : $*Tensor<τ_0_0>, #Tensor.handle // user: %77
%77 = struct_element_addr %76 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %200, %88, %78
%78 = load %77 : $*_AnyTensorHandle // user: %80
store %11 to %63 : $*Tensor<τ_0_0> // id: %79
strong_release %78 : $_AnyTensorHandle // id: %80
%81 = struct_element_addr %60 : $*Tensor<τ_0_0>, #Tensor.handle // user: %82
%82 = struct_element_addr %81 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %198, %92, %83
%83 = load %82 : $*_AnyTensorHandle // user: %85
store %5 to %60 : $*Tensor<τ_0_0> // id: %84
strong_release %83 : $_AnyTensorHandle // id: %85
%86 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %441, %417
%87 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %442, %413
%88 = load %77 : $*_AnyTensorHandle // user: %90
store %11 to %63 : $*Tensor<τ_0_0> // id: %89
strong_release %88 : $_AnyTensorHandle // id: %90
%91 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %206, %183, %151
%92 = load %82 : $*_AnyTensorHandle // user: %94
store %5 to %60 : $*Tensor<τ_0_0> // id: %93
strong_release %92 : $_AnyTensorHandle // id: %94
%95 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %207, %184, %152
%96 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %127
%97 = struct_element_addr %57 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // users: %239, %98
%98 = struct_element_addr %97 : $*Tensor<τ_0_0>, #Tensor.handle // user: %99
%99 = struct_element_addr %98 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %135, %100
%100 = load %99 : $*_AnyTensorHandle // user: %106
%101 = struct_element_addr %57 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // users: %240, %102
%102 = struct_element_addr %101 : $*Tensor<τ_0_0>, #Tensor.handle // user: %103
%103 = struct_element_addr %102 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %136, %104
%104 = load %103 : $*_AnyTensorHandle // user: %107
store %19 to %57 : $*BatchNorm<τ_0_0>.TangentVector // id: %105
strong_release %100 : $_AnyTensorHandle // id: %106
strong_release %104 : $_AnyTensorHandle // id: %107
%108 = struct_element_addr %54 : $*Tensor<τ_0_0>, #Tensor.handle // user: %109
%109 = struct_element_addr %108 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %133, %118, %110
%110 = load %109 : $*_AnyTensorHandle // user: %112
store %11 to %54 : $*Tensor<τ_0_0> // id: %111
strong_release %110 : $_AnyTensorHandle // id: %112
%113 = struct_element_addr %51 : $*Tensor<τ_0_0>, #Tensor.handle // user: %114
%114 = struct_element_addr %113 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %131, %122, %115
%115 = load %114 : $*_AnyTensorHandle // user: %117
store %5 to %51 : $*Tensor<τ_0_0> // id: %116
strong_release %115 : $_AnyTensorHandle // id: %117
%118 = load %109 : $*_AnyTensorHandle // user: %120
store %11 to %54 : $*Tensor<τ_0_0> // id: %119
strong_release %118 : $_AnyTensorHandle // id: %120
%121 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %250, %140, %218
%122 = load %114 : $*_AnyTensorHandle // user: %124
store %5 to %51 : $*Tensor<τ_0_0> // id: %123
strong_release %122 : $_AnyTensorHandle // id: %124
%125 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %251, %139, %219
%126 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %128
release_value %96 : $Tensor<τ_0_0> // id: %127
release_value %126 : $Tensor<τ_0_0> // id: %128
switch_enum %70 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, case #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb5!enumelt: bb1, case #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb6__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb4!enumelt: bb2 // id: %129
// %130 // users: %142, %141
bb1(%130 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>): // Preds: bb0
%131 = load %114 : $*_AnyTensorHandle // user: %132
strong_release %131 : $_AnyTensorHandle // id: %132
%133 = load %109 : $*_AnyTensorHandle // user: %134
strong_release %133 : $_AnyTensorHandle // id: %134
%135 = load %99 : $*_AnyTensorHandle // user: %137
%136 = load %103 : $*_AnyTensorHandle // user: %138
strong_release %135 : $_AnyTensorHandle // id: %137
strong_release %136 : $_AnyTensorHandle // id: %138
release_value %125 : $Tensor<τ_0_0> // id: %139
release_value %121 : $Tensor<τ_0_0> // id: %140
%141 = struct_extract %130 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %190
%142 = struct_extract %130 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.pullback_4 // users: %149, %144
%143 = alloc_stack $BatchNorm<τ_0_0>.TangentVector // users: %160, %156, %148, %155, %166
%144 = apply %142(%1) : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2, τ_0_3 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowScalar, τ_0_3 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>) -> (@owned Tensor<τ_0_1>, @owned Tensor<τ_0_2>, @owned BatchNorm<τ_0_3>.TangentVector) for <τ_0_0, τ_0_0, τ_0_0, τ_0_0> // users: %147, %146, %145
%145 = tuple_extract %144 : $(Tensor<τ_0_0>, Tensor<τ_0_0>, BatchNorm<τ_0_0>.TangentVector), 0 // users: %185, %151
%146 = tuple_extract %144 : $(Tensor<τ_0_0>, Tensor<τ_0_0>, BatchNorm<τ_0_0>.TangentVector), 1 // users: %187, %152
%147 = tuple_extract %144 : $(Tensor<τ_0_0>, Tensor<τ_0_0>, BatchNorm<τ_0_0>.TangentVector), 2 // user: %148
store %147 to %143 : $*BatchNorm<τ_0_0>.TangentVector // id: %148
strong_release %142 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2, τ_0_3 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowScalar, τ_0_3 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>) -> (@owned Tensor<τ_0_1>, @owned Tensor<τ_0_2>, @owned BatchNorm<τ_0_3>.TangentVector) for <τ_0_0, τ_0_0, τ_0_0, τ_0_0> // id: %149
// function_ref static Tensor<>.+ infix(_:_:)
%150 = function_ref @$s10TensorFlow0A0VAASjRzrlE1poiyACyxGAE_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %152, %151
%151 = apply %150<τ_0_0>(%145, %91, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %186, %170, %171
%152 = apply %150<τ_0_0>(%146, %95, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %188, %167, %169
%153 = metatype $@thick BatchNorm<τ_0_0>.TangentVector.Type // user: %155
// function_ref static AdditiveArithmetic.+= infix(_:_:)
%154 = function_ref @$ss18AdditiveArithmeticPsE2peoiyyxz_xtFZ : $@convention(method) <τ_0_0 where τ_0_0 : AdditiveArithmetic> (@inout τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> () // user: %155
%155 = apply %154<BatchNorm<τ_0_0>.TangentVector>(%66, %143, %153) : $@convention(method) <τ_0_0 where τ_0_0 : AdditiveArithmetic> (@inout τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> ()
%156 = struct_element_addr %143 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %157
%157 = struct_element_addr %156 : $*Tensor<τ_0_0>, #Tensor.handle // user: %158
%158 = struct_element_addr %157 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %159
%159 = load %158 : $*_AnyTensorHandle // user: %164
%160 = struct_element_addr %143 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %161
%161 = struct_element_addr %160 : $*Tensor<τ_0_0>, #Tensor.handle // user: %162
%162 = struct_element_addr %161 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %163
%163 = load %162 : $*_AnyTensorHandle // user: %165
strong_release %159 : $_AnyTensorHandle // id: %164
strong_release %163 : $_AnyTensorHandle // id: %165
dealloc_stack %143 : $*BatchNorm<τ_0_0>.TangentVector // id: %166
debug_value %152 : $Tensor<τ_0_0> // id: %167
// function_ref static Tensor<>.+= infix(_:_:)
%168 = function_ref @$s10TensorFlow0A0VAASjRzrlE2peoiyyACyxGz_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> () // users: %171, %169
%169 = apply %168<τ_0_0>(%60, %152, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> ()
debug_value %151 : $Tensor<τ_0_0> // id: %170
%171 = apply %168<τ_0_0>(%63, %151, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> ()
%172 = load %71 : $*Tensor<τ_0_0> // user: %192
%173 = load %72 : $*Tensor<τ_0_0> // user: %192
%174 = struct_extract %49 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %175
release_value %174 : $Tensor<τ_0_0> // id: %175
%176 = struct_extract %49 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %177
release_value %176 : $Tensor<τ_0_0> // id: %177
%178 = load %63 : $*Tensor<τ_0_0> // users: %192, %180
release_value %47 : $Tensor<τ_0_0> // id: %179
debug_value %178 : $Tensor<τ_0_0>, var, name "offset" // id: %180
%181 = load %60 : $*Tensor<τ_0_0> // users: %192, %189
release_value %45 : $Tensor<τ_0_0> // id: %182
release_value %91 : $Tensor<τ_0_0> // id: %183
release_value %95 : $Tensor<τ_0_0> // id: %184
release_value %145 : $Tensor<τ_0_0> // id: %185
release_value %151 : $Tensor<τ_0_0> // id: %186
release_value %146 : $Tensor<τ_0_0> // id: %187
release_value %152 : $Tensor<τ_0_0> // id: %188
debug_value %181 : $Tensor<τ_0_0>, var, name "scale" // id: %189
%190 = unchecked_enum_data %141 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb5__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb3!enumelt // user: %191
%191 = struct_extract %190 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %192
br bb3(%191 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, %178 : $Tensor<τ_0_0>, %181 : $Tensor<τ_0_0>, %172 : $Tensor<τ_0_0>, %173 : $Tensor<τ_0_0>) // id: %192
// %193 // users: %209, %208
bb2(%193 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>): // Preds: bb0
%194 = struct_element_addr %71 : $*Tensor<τ_0_0>, #Tensor.handle // user: %195
%195 = struct_element_addr %194 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %202
%196 = struct_element_addr %72 : $*Tensor<τ_0_0>, #Tensor.handle // user: %197
%197 = struct_element_addr %196 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %203
%198 = load %82 : $*_AnyTensorHandle // user: %199
strong_release %198 : $_AnyTensorHandle // id: %199
%200 = load %77 : $*_AnyTensorHandle // user: %201
strong_release %200 : $_AnyTensorHandle // id: %201
%202 = load %195 : $*_AnyTensorHandle // user: %204
%203 = load %197 : $*_AnyTensorHandle // user: %205
strong_release %202 : $_AnyTensorHandle // id: %204
strong_release %203 : $_AnyTensorHandle // id: %205
release_value %91 : $Tensor<τ_0_0> // id: %206
release_value %95 : $Tensor<τ_0_0> // id: %207
%208 = struct_extract %193 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %257
%209 = struct_extract %193 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.pullback_3 // users: %216, %211
%210 = alloc_stack $BatchNorm<τ_0_0>.TangentVector // users: %227, %223, %215, %222, %233
%211 = apply %209(%1) : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2, τ_0_3 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowScalar, τ_0_3 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>) -> (@owned Tensor<τ_0_1>, @owned Tensor<τ_0_2>, @owned BatchNorm<τ_0_3>.TangentVector) for <τ_0_0, τ_0_0, τ_0_0, τ_0_0> // users: %214, %213, %212
%212 = tuple_extract %211 : $(Tensor<τ_0_0>, Tensor<τ_0_0>, BatchNorm<τ_0_0>.TangentVector), 0 // users: %252, %218
%213 = tuple_extract %211 : $(Tensor<τ_0_0>, Tensor<τ_0_0>, BatchNorm<τ_0_0>.TangentVector), 1 // users: %254, %219
%214 = tuple_extract %211 : $(Tensor<τ_0_0>, Tensor<τ_0_0>, BatchNorm<τ_0_0>.TangentVector), 2 // user: %215
store %214 to %210 : $*BatchNorm<τ_0_0>.TangentVector // id: %215
strong_release %209 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2, τ_0_3 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowScalar, τ_0_3 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>) -> (@owned Tensor<τ_0_1>, @owned Tensor<τ_0_2>, @owned BatchNorm<τ_0_3>.TangentVector) for <τ_0_0, τ_0_0, τ_0_0, τ_0_0> // id: %216
// function_ref static Tensor<>.+ infix(_:_:)
%217 = function_ref @$s10TensorFlow0A0VAASjRzrlE1poiyACyxGAE_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %219, %218
%218 = apply %217<τ_0_0>(%212, %121, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %253, %237, %238
%219 = apply %217<τ_0_0>(%213, %125, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %255, %234, %236
%220 = metatype $@thick BatchNorm<τ_0_0>.TangentVector.Type // user: %222
// function_ref static AdditiveArithmetic.+= infix(_:_:)
%221 = function_ref @$ss18AdditiveArithmeticPsE2peoiyyxz_xtFZ : $@convention(method) <τ_0_0 where τ_0_0 : AdditiveArithmetic> (@inout τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> () // user: %222
%222 = apply %221<BatchNorm<τ_0_0>.TangentVector>(%57, %210, %220) : $@convention(method) <τ_0_0 where τ_0_0 : AdditiveArithmetic> (@inout τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> ()
%223 = struct_element_addr %210 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %224
%224 = struct_element_addr %223 : $*Tensor<τ_0_0>, #Tensor.handle // user: %225
%225 = struct_element_addr %224 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %226
%226 = load %225 : $*_AnyTensorHandle // user: %231
%227 = struct_element_addr %210 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %228
%228 = struct_element_addr %227 : $*Tensor<τ_0_0>, #Tensor.handle // user: %229
%229 = struct_element_addr %228 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %230
%230 = load %229 : $*_AnyTensorHandle // user: %232
strong_release %226 : $_AnyTensorHandle // id: %231
strong_release %230 : $_AnyTensorHandle // id: %232
dealloc_stack %210 : $*BatchNorm<τ_0_0>.TangentVector // id: %233
debug_value %219 : $Tensor<τ_0_0> // id: %234
// function_ref static Tensor<>.+= infix(_:_:)
%235 = function_ref @$s10TensorFlow0A0VAASjRzrlE2peoiyyACyxGz_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> () // users: %238, %236
%236 = apply %235<τ_0_0>(%51, %219, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> ()
debug_value %218 : $Tensor<τ_0_0> // id: %237
%238 = apply %235<τ_0_0>(%54, %218, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> ()
%239 = load %97 : $*Tensor<τ_0_0> // user: %259
%240 = load %101 : $*Tensor<τ_0_0> // user: %259
%241 = struct_extract %49 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %242
release_value %241 : $Tensor<τ_0_0> // id: %242
%243 = struct_extract %49 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %244
release_value %243 : $Tensor<τ_0_0> // id: %244
%245 = load %54 : $*Tensor<τ_0_0> // users: %259, %247
release_value %47 : $Tensor<τ_0_0> // id: %246
debug_value %245 : $Tensor<τ_0_0>, var, name "offset" // id: %247
%248 = load %51 : $*Tensor<τ_0_0> // users: %259, %256
release_value %45 : $Tensor<τ_0_0> // id: %249
release_value %121 : $Tensor<τ_0_0> // id: %250
release_value %125 : $Tensor<τ_0_0> // id: %251
release_value %212 : $Tensor<τ_0_0> // id: %252
release_value %218 : $Tensor<τ_0_0> // id: %253
release_value %213 : $Tensor<τ_0_0> // id: %254
release_value %219 : $Tensor<τ_0_0> // id: %255
debug_value %248 : $Tensor<τ_0_0>, var, name "scale" // id: %256
%257 = unchecked_enum_data %208 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb4__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb3!enumelt // user: %258
%258 = struct_extract %257 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %259
br bb3(%258 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, %245 : $Tensor<τ_0_0>, %248 : $Tensor<τ_0_0>, %239 : $Tensor<τ_0_0>, %240 : $Tensor<τ_0_0>) // id: %259
// %260 // user: %318
// %261 // users: %321, %276, %408, %404, %316, %280, %292, %298
// %262 // users: %320, %284, %408, %405, %317, %288, %302, %308
// %263 // users: %384, %322, %268, %265
// %264 // users: %385, %323, %269, %265
bb3(%260 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, %261 : $Tensor<τ_0_0>, %262 : $Tensor<τ_0_0>, %263 : $Tensor<τ_0_0>, %264 : $Tensor<τ_0_0>): // Preds: bb2 bb1
%265 = struct $BatchNorm<τ_0_0>.TangentVector (%263 : $Tensor<τ_0_0>, %264 : $Tensor<τ_0_0>) // users: %355, %266, %399, %309
debug_value %265 : $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // id: %266
%267 = struct_extract %43 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %270
retain_value %263 : $Tensor<τ_0_0> // id: %268
retain_value %264 : $Tensor<τ_0_0> // id: %269
release_value %267 : $Tensor<τ_0_0> // id: %270
%271 = struct_extract %43 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %272
release_value %271 : $Tensor<τ_0_0> // id: %272
%273 = struct_element_addr %40 : $*Tensor<τ_0_0>, #Tensor.handle // user: %274
%274 = struct_element_addr %273 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %382, %295, %289, %275
%275 = load %274 : $*_AnyTensorHandle // user: %279
%276 = struct_extract %261 : $Tensor<τ_0_0>, #Tensor.handle // user: %277
%277 = struct_extract %276 : $TensorHandle<τ_0_0>, #TensorHandle.handle // users: %296, %290, %278
strong_retain %277 : $_AnyTensorHandle // id: %278
strong_release %275 : $_AnyTensorHandle // id: %279
store %261 to %40 : $*Tensor<τ_0_0> // id: %280
%281 = struct_element_addr %37 : $*Tensor<τ_0_0>, #Tensor.handle // user: %282
%282 = struct_element_addr %281 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // users: %380, %305, %299, %283
%283 = load %282 : $*_AnyTensorHandle // user: %287
%284 = struct_extract %262 : $Tensor<τ_0_0>, #Tensor.handle // user: %285
%285 = struct_extract %284 : $TensorHandle<τ_0_0>, #TensorHandle.handle // users: %306, %300, %286
strong_retain %285 : $_AnyTensorHandle // id: %286
strong_release %283 : $_AnyTensorHandle // id: %287
store %262 to %37 : $*Tensor<τ_0_0> // id: %288
%289 = load %274 : $*_AnyTensorHandle // user: %291
strong_retain %277 : $_AnyTensorHandle // id: %290
strong_release %289 : $_AnyTensorHandle // id: %291
store %261 to %40 : $*Tensor<τ_0_0> // id: %292
%293 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %386, %363, %344
%294 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %389, %364, %339
%295 = load %274 : $*_AnyTensorHandle // user: %297
strong_retain %277 : $_AnyTensorHandle // id: %296
strong_release %295 : $_AnyTensorHandle // id: %297
store %261 to %40 : $*Tensor<τ_0_0> // id: %298
%299 = load %282 : $*_AnyTensorHandle // user: %301
strong_retain %285 : $_AnyTensorHandle // id: %300
strong_release %299 : $_AnyTensorHandle // id: %301
store %262 to %37 : $*Tensor<τ_0_0> // id: %302
%303 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %388, %365, %334
%304 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %387, %366, %329
%305 = load %282 : $*_AnyTensorHandle // user: %307
strong_retain %285 : $_AnyTensorHandle // id: %306
strong_release %305 : $_AnyTensorHandle // id: %307
store %262 to %37 : $*Tensor<τ_0_0> // id: %308
debug_value %265 : $BatchNorm<τ_0_0>.TangentVector, let, name "self", argno 2, implicit, expr op_deref // id: %309
%310 = struct_extract %35 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %311
release_value %310 : $Tensor<τ_0_0> // id: %311
%312 = struct_extract %35 : $BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %313
release_value %312 : $Tensor<τ_0_0> // id: %313
release_value %33 : $Tensor<τ_0_0> // id: %314
release_value %31 : $Tensor<τ_0_0> // id: %315
debug_value %261 : $Tensor<τ_0_0>, var, name "offset" // id: %316
debug_value %262 : $Tensor<τ_0_0>, var, name "scale" // id: %317
switch_enum %260 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, case #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb1!enumelt: bb4, case #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb3__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb2!enumelt: bb5 // id: %318
// %319 // users: %326, %325, %324
bb4(%319 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>): // Preds: bb3
release_value %262 : $Tensor<τ_0_0> // id: %320
release_value %261 : $Tensor<τ_0_0> // id: %321
release_value %263 : $Tensor<τ_0_0> // id: %322
release_value %264 : $Tensor<τ_0_0> // id: %323
%324 = struct_extract %319 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %376
%325 = struct_extract %319 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.pullback_1 // users: %343, %342
%326 = struct_extract %319 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.pullback_2 // users: %333, %332
%327 = load %37 : $*Tensor<τ_0_0> // users: %367, %329
// function_ref static Tensor<>.+ infix(_:_:)
%328 = function_ref @$s10TensorFlow0A0VAASjRzrlE1poiyACyxGAE_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %344, %339, %334, %329
%329 = apply %328<τ_0_0>(%327, %304, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %368, %332
%330 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %331
store %330 to %37 : $*Tensor<τ_0_0> // id: %331
%332 = apply %326(%329) : $@callee_guaranteed @substituted <τ_0_0, τ_0_1 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>) -> @owned Tensor<τ_0_1> for <τ_0_0, τ_0_0> // users: %369, %334
strong_release %326 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>) -> @owned Tensor<τ_0_1> for <τ_0_0, τ_0_0> // id: %333
%334 = apply %328<τ_0_0>(%332, %303, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %371, %335, %337
debug_value %334 : $Tensor<τ_0_0> // id: %335
// function_ref static Tensor<>.+= infix(_:_:)
%336 = function_ref @$s10TensorFlow0A0VAASjRzrlE2peoiyyACyxGz_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> () // users: %346, %337
%337 = apply %336<τ_0_0>(%37, %334, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> ()
%338 = load %40 : $*Tensor<τ_0_0> // users: %372, %339
%339 = apply %328<τ_0_0>(%338, %294, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %373, %342
%340 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // user: %341
store %340 to %40 : $*Tensor<τ_0_0> // id: %341
%342 = apply %325(%339) : $@callee_guaranteed @substituted <τ_0_0, τ_0_1 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>) -> @owned Tensor<τ_0_1> for <τ_0_0, τ_0_0> // users: %374, %344
strong_release %325 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>) -> @owned Tensor<τ_0_1> for <τ_0_0, τ_0_0> // id: %343
%344 = apply %328<τ_0_0>(%342, %293, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %375, %345, %346
debug_value %344 : $Tensor<τ_0_0> // id: %345
%346 = apply %336<τ_0_0>(%40, %344, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@inout Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> ()
%347 = struct_element_addr %28 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %348
%348 = struct_element_addr %347 : $*Tensor<τ_0_0>, #Tensor.handle // user: %349
%349 = struct_element_addr %348 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %350
%350 = load %349 : $*_AnyTensorHandle // user: %356
%351 = struct_element_addr %28 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %352
%352 = struct_element_addr %351 : $*Tensor<τ_0_0>, #Tensor.handle // user: %353
%353 = struct_element_addr %352 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %354
%354 = load %353 : $*_AnyTensorHandle // user: %357
store %265 to %28 : $*BatchNorm<τ_0_0>.TangentVector // id: %355
strong_release %350 : $_AnyTensorHandle // id: %356
strong_release %354 : $_AnyTensorHandle // id: %357
%358 = load %40 : $*Tensor<τ_0_0> // users: %378, %360
release_value %26 : $Tensor<τ_0_0> // id: %359
debug_value %358 : $Tensor<τ_0_0>, var, name "offset" // id: %360
%361 = load %37 : $*Tensor<τ_0_0> // users: %378, %370
release_value %24 : $Tensor<τ_0_0> // id: %362
release_value %293 : $Tensor<τ_0_0> // id: %363
release_value %294 : $Tensor<τ_0_0> // id: %364
release_value %303 : $Tensor<τ_0_0> // id: %365
release_value %304 : $Tensor<τ_0_0> // id: %366
release_value %327 : $Tensor<τ_0_0> // id: %367
release_value %329 : $Tensor<τ_0_0> // id: %368
release_value %332 : $Tensor<τ_0_0> // id: %369
debug_value %361 : $Tensor<τ_0_0>, var, name "scale" // id: %370
release_value %334 : $Tensor<τ_0_0> // id: %371
release_value %338 : $Tensor<τ_0_0> // id: %372
release_value %339 : $Tensor<τ_0_0> // id: %373
release_value %342 : $Tensor<τ_0_0> // id: %374
release_value %344 : $Tensor<τ_0_0> // id: %375
%376 = unchecked_enum_data %324 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb1__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb0!enumelt // user: %377
%377 = struct_extract %376 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb0__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb0__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.pullback_0 // user: %378
br bb6(%377 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_1>) -> @owned BatchNorm<τ_0_2>.TangentVector for <τ_0_0, τ_0_0, τ_0_0>, %358 : $Tensor<τ_0_0>, %361 : $Tensor<τ_0_0>) // id: %378
// %379 // user: %390
bb5(%379 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb2__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>): // Preds: bb3
%380 = load %282 : $*_AnyTensorHandle // user: %381
strong_release %380 : $_AnyTensorHandle // id: %381
%382 = load %274 : $*_AnyTensorHandle // user: %383
strong_release %382 : $_AnyTensorHandle // id: %383
release_value %263 : $Tensor<τ_0_0> // id: %384
release_value %264 : $Tensor<τ_0_0> // id: %385
release_value %293 : $Tensor<τ_0_0> // id: %386
release_value %304 : $Tensor<τ_0_0> // id: %387
release_value %303 : $Tensor<τ_0_0> // id: %388
release_value %294 : $Tensor<τ_0_0> // id: %389
%390 = struct_extract %379 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb2__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb2__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.predecessor // user: %406
%391 = struct_element_addr %28 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %392
%392 = struct_element_addr %391 : $*Tensor<τ_0_0>, #Tensor.handle // user: %393
%393 = struct_element_addr %392 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %394
%394 = load %393 : $*_AnyTensorHandle // user: %400
%395 = struct_element_addr %28 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %396
%396 = struct_element_addr %395 : $*Tensor<τ_0_0>, #Tensor.handle // user: %397
%397 = struct_element_addr %396 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %398
%398 = load %397 : $*_AnyTensorHandle // user: %401
store %265 to %28 : $*BatchNorm<τ_0_0>.TangentVector // id: %399
strong_release %394 : $_AnyTensorHandle // id: %400
strong_release %398 : $_AnyTensorHandle // id: %401
release_value %26 : $Tensor<τ_0_0> // id: %402
release_value %24 : $Tensor<τ_0_0> // id: %403
debug_value %261 : $Tensor<τ_0_0>, var, name "offset" // id: %404
debug_value %262 : $Tensor<τ_0_0>, var, name "scale" // id: %405
%406 = unchecked_enum_data %390 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb2__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb2__Pred__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.bb0!enumelt // user: %407
%407 = struct_extract %406 : $_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb0__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl<τ_0_0>, #_AD__$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGF_bb0__PB__src_0_wrt_1_10TensorFlow0aB13FloatingPointRzl.pullback_0 // user: %408
br bb6(%407 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_1>) -> @owned BatchNorm<τ_0_2>.TangentVector for <τ_0_0, τ_0_0, τ_0_0>, %261 : $Tensor<τ_0_0>, %262 : $Tensor<τ_0_0>) // id: %408
// %409 // users: %424, %422
// %410 // users: %445, %417
// %411 // users: %443, %413
bb6(%409 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_1>) -> @owned BatchNorm<τ_0_2>.TangentVector for <τ_0_0, τ_0_0, τ_0_0>, %410 : $Tensor<τ_0_0>, %411 : $Tensor<τ_0_0>): // Preds: bb4 bb5
// function_ref static Tensor<>.+ infix(_:_:)
%412 = function_ref @$s10TensorFlow0A0VAASjRzrlE1poiyACyxGAE_AEtFZ : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %417, %413
%413 = apply %412<τ_0_0>(%411, %87, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %444, %422
%414 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %415, %416
release_value %414 : $Tensor<τ_0_0> // id: %415
debug_value %414 : $Tensor<τ_0_0>, var, name "scale" // id: %416
%417 = apply %412<τ_0_0>(%410, %86, %3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_0>, @thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %446, %422
%418 = apply %4<τ_0_0>(%3) : $@convention(method) <τ_0_0 where τ_0_0 : Numeric, τ_0_0 : TensorFlowScalar> (@thin Tensor<τ_0_0>.Type) -> @owned Tensor<τ_0_0> // users: %419, %420
release_value %418 : $Tensor<τ_0_0> // id: %419
debug_value %418 : $Tensor<τ_0_0>, var, name "offset" // id: %420
%421 = alloc_stack $BatchNorm<τ_0_0>.TangentVector // users: %432, %428, %423, %427, %438
%422 = apply %409(%417, %413) : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_1>) -> @owned BatchNorm<τ_0_2>.TangentVector for <τ_0_0, τ_0_0, τ_0_0> // user: %423
store %422 to %421 : $*BatchNorm<τ_0_0>.TangentVector // id: %423
strong_release %409 : $@callee_guaranteed @substituted <τ_0_0, τ_0_1, τ_0_2 where τ_0_0 : TensorFlowScalar, τ_0_1 : TensorFlowScalar, τ_0_2 : TensorFlowFloatingPoint> (@guaranteed Tensor<τ_0_0>, @guaranteed Tensor<τ_0_1>) -> @owned BatchNorm<τ_0_2>.TangentVector for <τ_0_0, τ_0_0, τ_0_0> // id: %424
%425 = metatype $@thick BatchNorm<τ_0_0>.TangentVector.Type // user: %427
// function_ref static AdditiveArithmetic.+= infix(_:_:)
%426 = function_ref @$ss18AdditiveArithmeticPsE2peoiyyxz_xtFZ : $@convention(method) <τ_0_0 where τ_0_0 : AdditiveArithmetic> (@inout τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> () // user: %427
%427 = apply %426<BatchNorm<τ_0_0>.TangentVector>(%28, %421, %425) : $@convention(method) <τ_0_0 where τ_0_0 : AdditiveArithmetic> (@inout τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> ()
%428 = struct_element_addr %421 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.offset // user: %429
%429 = struct_element_addr %428 : $*Tensor<τ_0_0>, #Tensor.handle // user: %430
%430 = struct_element_addr %429 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %431
%431 = load %430 : $*_AnyTensorHandle // user: %436
%432 = struct_element_addr %421 : $*BatchNorm<τ_0_0>.TangentVector, #BatchNorm.TangentVector.scale // user: %433
%433 = struct_element_addr %432 : $*Tensor<τ_0_0>, #Tensor.handle // user: %434
%434 = struct_element_addr %433 : $*TensorHandle<τ_0_0>, #TensorHandle.handle // user: %435
%435 = load %434 : $*_AnyTensorHandle // user: %437
strong_release %431 : $_AnyTensorHandle // id: %436
strong_release %435 : $_AnyTensorHandle // id: %437
dealloc_stack %421 : $*BatchNorm<τ_0_0>.TangentVector // id: %438
%439 = load %28 : $*BatchNorm<τ_0_0>.TangentVector // user: %440
store %439 to %0 : $*BatchNorm<τ_0_0>.TangentVector // id: %440
release_value %86 : $Tensor<τ_0_0> // id: %441
release_value %87 : $Tensor<τ_0_0> // id: %442
release_value %411 : $Tensor<τ_0_0> // id: %443
release_value %413 : $Tensor<τ_0_0> // id: %444
release_value %410 : $Tensor<τ_0_0> // id: %445
release_value %417 : $Tensor<τ_0_0> // id: %446
dealloc_stack %66 : $*BatchNorm<τ_0_0>.TangentVector // id: %447
dealloc_stack %63 : $*Tensor<τ_0_0> // id: %448
dealloc_stack %60 : $*Tensor<τ_0_0> // id: %449
dealloc_stack %57 : $*BatchNorm<τ_0_0>.TangentVector // id: %450
dealloc_stack %54 : $*Tensor<τ_0_0> // id: %451
dealloc_stack %51 : $*Tensor<τ_0_0> // id: %452
dealloc_stack %40 : $*Tensor<τ_0_0> // id: %453
dealloc_stack %37 : $*Tensor<τ_0_0> // id: %454
dealloc_stack %28 : $*BatchNorm<τ_0_0>.TangentVector // id: %455
%456 = tuple () // user: %457
return %456 : $() // id: %457
} // end sil function '$s10TensorFlow9BatchNormV14callAsFunctionyAA0A0VyxGAGFAA0aB13FloatingPointRzlTJpUSpSr'
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment