This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False): | |
#Initialize parameters with 0s | |
w, b = initialize_with_zeros(X_train.shape[0]) | |
#Gradient descent | |
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) | |
#Retrive parameters w, b from dictionary | |
w = parameters['w'] | |
b = parameters['b'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def initialize_parameters_deep(layer_dims): | |
parameters = {} | |
L = len(layer_dims) # number of layers in the network | |
for l in range(1, L): | |
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01 | |
parameters['b' + str(l)] = np.zeros(shape=(layer_dims[l], 1)) | |
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def linear_forward(A, W, b): | |
Z = np.dot(W, A) + b | |
assert(Z.shape == (W.shape[0], A.shape[1])) | |
cache = (A, W, b) | |
return Z, cache |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def linear_activation_forward(A_prev, W, b, activation): | |
if activation == "sigmoid": | |
Z, linear_cache = linear_forward(A_prev, W, b) | |
A, activation_cache = sigmoid(Z) | |
elif activation == "relu": | |
Z, linear_cache = linear_forward(A_prev, W, b) | |
A, activation_cache = relu(Z) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def L_model_forward(X, parameters): | |
caches = [] | |
A = X | |
L = len(parameters) // 2 # number of layers in the neural network | |
for l in range(1, L): | |
A_prev = A | |
A, cache = linear_activation_forward(A_prev, | |
parameters['W' + str(l)], |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def compute_cost(AL, Y): | |
m = Y.shape[1] | |
cost = (-1/m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1 - Y), np.log(1 - AL))) | |
cost = np.squeeze(cost) | |
assert(cost.shape == ()) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def linear_backward(dZ, cache): | |
A_prev, W, b = cache | |
m = A_prev.shape[1] | |
dW = (1 / m) * np.dot(dZ, cache[0].T) | |
db = (1/m) * np.sum(dZ, axis=1, keepdims=True) | |
dA_prev = np.dot(cache[1].T, dZ) | |
assert (dA_prev.shape == A_prev.shape) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def linear_activation_backward(dA, cache, activation): | |
linear_cache, activation_cache = cache | |
if activation == "relu": | |
dZ = relu_backward(dA, activation_cache) | |
dA_prev, dW, db = linear_backward(dZ, linear_cache) | |
elif activation == "sigmoid": | |
dZ = sigmoid_backward(dA, activation_cache) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def L_model_backward(AL, Y, caches): | |
grads = {} | |
L = len(caches) # the number of layers | |
m = AL.shape[1] | |
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL | |
dAL = dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) | |
current_cache = caches[-1] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def update_parameters(parameters, grads, learning_rate): | |
L = len(parameters) // 2 # number of layers in the neural network | |
for l in range(L): | |
parameters["W" + str(l+1)] = parameters['W' + str(l + 1)] - learning_rate * grads['dW' + str(l + 1)] | |
parameters["b" + str(l+1)] = parameters['b' + str(l + 1)] - learning_rate * grads['db' + str(l + 1)] | |
return parameters |
OlderNewer