Created
March 11, 2019 19:54
-
-
Save marcopeix/a69fe86f04248ddd928b1ededb202c45 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X, Y, layers_dims, optimizer, learning_rate=0.0007, mini_batch_size=64, beta=0.9, | |
beta1=0.9, beta2=0.999, epsilon=1e-8, num_epochs=10000, print_cost=True): | |
L = len(layers_dims) # number of layers in the neural networks | |
costs = [] # to keep track of the cost | |
t = 0 # initializing the counter required for Adam update | |
seed = 10 | |
# Initialize parameters | |
parameters = initialize_parameters(layers_dims) | |
# Initialize the optimizer | |
if optimizer == "gd": | |
pass # no initialization required for gradient descent | |
elif optimizer == "momentum": | |
v = initialize_velocity(parameters) | |
elif optimizer == "adam": | |
v, s = initialize_adam(parameters) | |
# Optimization loop | |
for i in range(num_epochs): | |
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch | |
seed = seed + 1 | |
minibatches = random_mini_batches(X, Y, mini_batch_size, seed) | |
for minibatch in minibatches: | |
# Select a minibatch | |
(minibatch_X, minibatch_Y) = minibatch | |
# Forward propagation | |
a3, caches = forward_propagation(minibatch_X, parameters) | |
# Compute cost | |
cost = compute_cost(a3, minibatch_Y) | |
# Backward propagation | |
grads = backward_propagation(minibatch_X, minibatch_Y, caches) | |
# Update parameters | |
if optimizer == "gd": | |
parameters = update_parameters_with_gd(parameters, grads, learning_rate) | |
elif optimizer == "momentum": | |
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate) | |
elif optimizer == "adam": | |
t = t + 1 # Adam counter | |
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, | |
t, learning_rate, beta1, beta2, epsilon) | |
# Print the cost every 1000 epoch | |
if print_cost and i % 1000 == 0: | |
print("Cost after epoch %i: %f" % (i, cost)) | |
if print_cost and i % 100 == 0: | |
costs.append(cost) | |
# plot the cost | |
plt.plot(costs) | |
plt.ylabel('cost') | |
plt.xlabel('epochs (per 100)') | |
plt.title("Learning rate = " + str(learning_rate)) | |
plt.show() | |
return parameters |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment