- Name : Aditya Kaushik
- Github : aditya98ak
- Twitter : adity98ak
- Facebook : AdityaKaushik001
I have worked on following projects in this contest :
def backprop_this_layer(self, da_curr, z_curr, W_curr, b_curr, A_prev, activation_function): | |
if activation_function is 'sigmoid': | |
activation_back = self.sigmoid_backward | |
elif activation_function is 'relu': | |
activation_back = self.relu_backward | |
else: | |
return | |
m = A_prev.shape[1] | |
dz_curr = activation_back(da_curr, z_curr) |
def forward_prop_this_layer(self, A_prev, W_curr, b_curr, activation_function): | |
z_curr = np.dot(W_curr, A_prev) + b_curr | |
if activation_function is 'relu': | |
activation = relu | |
elif activation_function is 'sigmoid': | |
activation = sigmoid | |
else: | |
raise Exception(f"{activation_function} is currently not supported, Only sigmoid, relu are supported") |
def initialize_params(self, architecture): | |
# We'll save parameters in a dictionary so that, we can acess them later on | |
params = {} | |
for id_, layer in enumerate(architecture): | |
# We're starting our layer from 1, There's a reason for this, think about it. | |
layer_id = id_ + 1 | |
# With help of architecture provided, we'll get dimensions for each layer. | |
input_dim = layer['input_dim'] | |
output_dim = layer['output_dim'] | |
NN_ARCHITECTURE = [ | |
{"input_dim": 3, "output_dim": 4, "activation": "relu"},# First Hidden Layer | |
{"input_dim": 4, "output_dim": 4, "activation": "relu"},# Second Hidden Layer | |
{"input_dim": 4, "output_dim": 1, "activation": "sigmoid"},# Output Layer | |
] | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
from sklearn.datasets import make_blobs | |
# data is a tuple with 2 columns, one is the dataset that we made, and second is the labels representing centre. | |
data = make_blobs(n_samples = 200, n_features=2, centers=3, cluster_std=1.5) | |
from sklearn.cluster import KMeans | |
kmeans = KMeans(n_clusters=3) | |
kmeans.fit(data[0]) |
I have worked on following projects in this contest :