Skip to content

Instantly share code, notes, and snippets.

@ImadDabbura
Created September 20, 2018 15:20
Show Gist options
  • Save ImadDabbura/003dede7c4da6b9c943c17c90148db36 to your computer and use it in GitHub Desktop.
Save ImadDabbura/003dede7c4da6b9c943c17c90148db36 to your computer and use it in GitHub Desktop.
def model_with_regularization(
X, y, layers_dims, learning_rate=0.01, num_epochs=3000,
print_cost=False, hidden_layers_activation_fn="relu", lambd=0):
# get number of examples
m = X.shape[1]
# to get consistents output
np.random.seed(1)
# initialize parameters
parameters = initialize_parameters(layers_dims)
# intialize cost list
cost_list = []
# implement gradient descent
for i in range(num_epochs):
# compute forward propagation
AL, caches = L_model_forward(
X, parameters, hidden_layers_activation_fn)
# compute regularized cost
reg_cost = compute_cost_reg(AL, y, parameters, lambd)
# compute gradients
grads = L_model_backward_reg(
AL, y, caches, hidden_layers_activation_fn, lambd)
# update parameters
parameters = update_parameters(parameters, grads, learning_rate)
# print cost
if (i + 1) % 100 == 0 and print_cost:
print("The cost after {} iterations: {}".format(
(i + 1), reg_cost))
# append cost
if i % 100 == 0:
cost_list.append(reg_cost)
# plot the cost curve
plt.plot(cost_list)
plt.xlabel("Iterations (per hundreds)")
plt.ylabel("Cost")
plt.title("Cost curve for the learning rate = {}".format(learning_rate))
return parameters
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment