Skip to content

Instantly share code, notes, and snippets.

def L_layer_model(X, Y, nn_architecture, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
np.random.seed(1)
# keep track of cost
costs = []
# Parameters initialization.
parameters = initialize_parameters(nn_architecture)
# Loop (gradient descent)
for i in range(0, num_iterations):
def update_parameters(parameters, grads, learning_rate):
L = len(parameters) // 2 # number of layers in the neural network
for l in range(1, L):
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * grads["dW" + str(l)]
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * grads["db" + str(l)]
return parameters
def L_model_backward(AL, Y, parameters, forward_cache, nn_architecture):
grads = {}
number_of_layers = len(nn_architecture)
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
dA_prev = dAL
def compute_cost(AL, Y):
m = Y.shape[1]
# Compute loss from AL and y
logprobs = np.multiply(np.log(AL),Y) + np.multiply(1 - Y, np.log(1 - AL))
# cross-entropy cost
cost = - np.sum(logprobs) / m
cost = np.squeeze(cost)
def L_model_forward(X, parameters, nn_architecture):
forward_cache = {}
A = X
number_of_layers = len(nn_architecture)
for l in range(1, number_of_layers):
A_prev = A
W = parameters['W' + str(l)]
b = parameters['b' + str(l)]
activation = nn_architecture[l]["activation"]
def sigmoid(Z):
S = 1 / (1 + np.exp(-Z))
return S
def relu(Z):
R = np.maximum(0, Z)
return R
def sigmoid_backward(dA, Z):
S = sigmoid(Z)
import numpy as np
import matplotlib.pyplot as plt
nn_architecture = [
{"layer_size": 4, "activation": "none"}, # input layer
{"layer_size": 5, "activation": "relu"},
{"layer_size": 4, "activation": "relu"},
{"layer_size": 3, "activation": "relu"},
{"layer_size": 1, "activation": "sigmoid"}
]