This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
data = data[data.TICKER != 'GEF'] | |
data = data[data.TYPE != 'Intraday'] | |
drop_cols = ['SPLIT_RATIO', 'EX_DIVIDEND', 'ADJ_FACTOR', 'ADJ_VOLUME', 'ADJ_CLOSE', 'ADJ_LOW', 'ADJ_HIGH', 'ADJ_OPEN', 'VOLUME', 'FREQUENCY', 'TYPE', 'FIGI'] | |
data.drop(drop_cols, axis=1, inplace=True) | |
data.head() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import pandas as pd | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
sns.set() | |
from sklearn.metrics import r2_score, median_absolute_error, mean_absolute_error | |
from sklearn.metrics import median_absolute_error, mean_squared_error, mean_squared_log_error | |
from scipy.optimize import minimize |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def djmodel(Tx, n_a, n_values): | |
# Define the input of your model with a shape | |
X = Input(shape=(Tx, n_values)) | |
# Define s0, initial hidden state for the decoder LSTM | |
a0 = Input(shape=(n_a,), name='a0') | |
c0 = Input(shape=(n_a,), name='c0') | |
a = a0 | |
c = c0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27): | |
# Retrieve n_x and n_y from vocab_size | |
n_x, n_y = vocab_size, vocab_size | |
# Initialize parameters | |
parameters = initialize_parameters(n_a, n_x, n_y) | |
# Initialize loss (this is required because we want to smooth our loss, don't worry about it) | |
loss = get_initial_loss(vocab_size, dino_names) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Reset the graph | |
tf.reset_default_graph() | |
# Start interactive session | |
sess = tf.InteractiveSession() | |
content_image = scipy.misc.imread("images/louvre_small.jpg") | |
content_image = reshape_and_normalize_image(content_image) | |
style_image = scipy.misc.imread("images/monet.jpg") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def ResNet50(input_shape = (64, 64, 3), classes = 6): | |
# Define the input as a tensor with shape input_shape | |
X_input = Input(input_shape) | |
# Zero-Padding | |
X = ZeroPadding2D((3, 3))(X_input) | |
# Stage 1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X_train, Y_train, X_test, Y_test, learning_rate=0.009, | |
num_epochs=400, minibatch_size=64, print_cost=True): | |
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables | |
tf.set_random_seed(1) # to keep results consistent (tensorflow seed) | |
seed = 3 # to keep results consistent (numpy seed) | |
(m, n_H0, n_W0, n_C0) = X_train.shape | |
n_y = Y_train.shape[1] | |
costs = [] # To keep track of the cost | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, | |
num_epochs = 1500, minibatch_size = 32, print_cost = True): | |
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables | |
tf.set_random_seed(1) # to keep consistent results | |
seed = 3 # to keep consistent results | |
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set) | |
n_y = Y_train.shape[0] # n_y : output size | |
costs = [] # To keep track of the cost | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X, Y, layers_dims, optimizer, learning_rate=0.0007, mini_batch_size=64, beta=0.9, | |
beta1=0.9, beta2=0.999, epsilon=1e-8, num_epochs=10000, print_cost=True): | |
L = len(layers_dims) # number of layers in the neural networks | |
costs = [] # to keep track of the cost | |
t = 0 # initializing the counter required for Adam update | |
seed = 10 | |
# Initialize parameters | |
parameters = initialize_parameters(layers_dims) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def backward_propagation_with_dropout(X, Y, cache, keep_prob): | |
m = X.shape[1] | |
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache | |
dZ3 = A3 - Y | |
dW3 = 1./m * np.dot(dZ3, A2.T) | |
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) | |
dA2 = np.dot(W3.T, dZ3) | |