Skip to content

Instantly share code, notes, and snippets.

View SuvroBaner's full-sized avatar
🏠
Working from home

Suvro Banerjee SuvroBaner

🏠
Working from home
View GitHub Profile
import numpy as np
import tensorflow as tf
print(tf.__version__) # 1.15.0
w = tf.Variable(0, dtype = tf.float32) # defining the parameter "w"
#cost = tf.add(tf.add(w**2, tf.multiply(-10., w)), 25) # defining the coost function "J"
cost = w**2 -10*w + 25
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # learning rate is 0.01 to minimize the cost
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
import random
def quick_sort(array):
if len(array) <= 1: return array
pivot = random.randint(0, len(array)-1)
left, right = list(), list()
for ii in range(len(array)):
if array[ii] < array[pivot]:
def partition(arr, low, high):
pivot = arr[high] # pivot is the last element of the array
i = (low - 1) # index of the smaller element, it starts with -1
# j : increments through the array and checks with the pivot if it is greater or smaller
for j in range(low, high):
if arr[j] <= pivot:
i = i + 1
# swapping logic
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1] # swaping the pivot value now
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations