Richard Wei
January 2019
// NOTE: The where clause is needed because of SR-9595. | |
struct Fn<T : Differentiable, U : Differentiable> | |
where T.TangentVector : AdditiveArithmetic, T.CotangentVector : AdditiveArithmetic, | |
U.TangentVector : AdditiveArithmetic, U.CotangentVector : AdditiveArithmetic { | |
let original: (T) -> U | |
let jvp: (T) -> (value: U, differential: (T.TangentVector) -> U.TangentVector) | |
let vjp: (T) -> (value: U, pullback: (U.CotangentVector) -> T.CotangentVector) | |
} | |
extension Fn : Equatable where U : Equatable { |
func f(x: Float) -> Float { | |
return x + x + x | |
} | |
print(pullback(at: 1, in: f)(2)) | |
// AD__$s11sideeffects1f1xS2f_tF__adjoint_src_0_wrt_0 | |
sil hidden @AD__$s11sideeffects1f1xS2f_tF__adjoint_src_0_wrt_0 : $@convention(thin) (Float, AD__$s11sideeffects1f1xS2f_tF__Type__src_0_wrt_0, Float, Float) -> Float { | |
// %0 // user: %8 | |
// %1 // users: %18, %4 |
//============================================================================// | |
// Part 1. StoredPropertyIterable | |
// This models the purely static layout of a struct. | |
//============================================================================// | |
// This is an implementation detail that is required before PAT existentials are | |
// possible. | |
protocol _StoredPropertyIterableBase { | |
static var _allStoredPropertiesTypeErased: [AnyKeyPath] { get } | |
static var _recursivelyAllStoredPropertiesTypeErased: [AnyKeyPath] { get } |
diff --git a/tensorflow/core/kernels/concat_lib_gpu_impl.cu.cc b/tensorflow/core/kernels/concat_lib_gpu_impl.cu.cc | |
index a561d918bd..785e0ddf4e 100644 | |
--- a/tensorflow/core/kernels/concat_lib_gpu_impl.cu.cc | |
+++ b/tensorflow/core/kernels/concat_lib_gpu_impl.cu.cc | |
@@ -69,7 +69,7 @@ __global__ void concat_variable_kernel( | |
IntType num_inputs = input_ptr_data.size; | |
// verbose declaration needed due to template | |
- extern __shared__ __align__(sizeof(T)) unsigned char smem[]; | |
+ extern __shared__ unsigned char smem[]; |
/// Y combinator | |
func fix<D, R>( | |
_ f: @escaping (Rep<(D) -> R>) -> Rep<(D) -> R>) -> Rep<(D) -> R> { | |
return lambda { d in f(fix(f))[d] } | |
} | |
let fac: Rep<(Int) -> Int> = fix { f in | |
lambda { (n: Rep<Int>) in | |
.if(n == 0, then: ^1, else: n * f[n - 1]) | |
} |
/// A type equality guarantee is capable of safely casting one value to a | |
/// different type. It can only be created when `S` and `T` are statically known | |
/// to be equal. | |
struct TypeEqualityGuarantee<S, T> { | |
private init() {} | |
/// Safely cast a value to the other type. | |
func cast(_ value: T) -> S { | |
return value as! S | |
} |
In the literature the term automatic differentiation (AD) is reserved for a specific technique in which the gradient of a program is calculated by creating an adjoint program, which performs the gradient calculation of the given program. Note that this adjoint program includes all control flow statements. There are two approaches to implementing AD: Source code transformation (SCT) and operator overloading (OO). With source code transformation we generate the adjoint program in the host language e.g. given a Python function we manipulate the abstract syntax tree (AST) directly in order to create a new Python function which performs the gradient computation. Operator overloading on the other hand overloads each operator to add an entry to a tape (Wengert list). Once the function exits, the gradient is calculated by going through the tape in reverse order, applying the gradient operators.
Theano does not employ AD but "[a highly optimized form of
A comparison of Theano with other deep learning frameworks, highlighting a series of low-level design choices in no particular order.
Overview
Symbolic: Theano, CGT; Automatic: Torch, MXNet
Symbolic and automatic differentiation are often confused or used interchangeably, although their implementations are significantly different.