Skip to content

Instantly share code, notes, and snippets.

@ImadDabbura
Last active September 20, 2018 15:18
Show Gist options
  • Save ImadDabbura/6140ba5a2b1fe9a548935922409bffa1 to your computer and use it in GitHub Desktop.
Save ImadDabbura/6140ba5a2b1fe9a548935922409bffa1 to your computer and use it in GitHub Desktop.
def linear_backword_reg(dZ, cache, lambd=0):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1 / m) * np.dot(dZ, A_prev.T) + (lambd / m) * W
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward_reg(dA, cache, activation_fn="relu", lambd=0):
linear_cache, activation_cache = cache
if activation_fn == "sigmoid":
dZ = sigmoid_gradient(dA, activation_cache)
dA_prev, dW, db = linear_backword_reg(dZ, linear_cache, lambd)
elif activation_fn == "tanh":
dZ = tanh_gradient(dA, activation_cache)
dA_prev, dW, db = linear_backword_reg(dZ, linear_cache, lambd)
elif activation_fn == "relu":
dZ = relu_gradient(dA, activation_cache)
dA_prev, dW, db = linear_backword_reg(dZ, linear_cache, lambd)
return dA_prev, dW, db
def L_model_backward_reg(AL, y, caches, hidden_layers_activation_fn="relu",
lambd=0):
y = y.reshape(AL.shape)
L = len(caches)
grads = {}
dAL = np.divide(AL - y, np.multiply(AL, 1 - AL))
grads["dA" + str(L - 1)], grads["dW" + str(L)], grads["db" + str(L)] =\
linear_activation_backward_reg(dAL, caches[L - 1], "sigmoid", lambd)
for l in range(L - 1, 0, -1):
current_cache = caches[l - 1]
grads["dA" + str(l - 1)], grads["dW" + str(l)], grads["db" + str(l)] =\
linear_activation_backward_reg(
grads["dA" + str(l)], current_cache,
hidden_layers_activation_fn, lambd)
return grads
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment