Skip to content

Instantly share code, notes, and snippets.

@aishwarya-singh25
Last active April 15, 2022 23:01
Show Gist options
  • Save aishwarya-singh25/d6902d4135a6a03e666897dcefb61ce2 to your computer and use it in GitHub Desktop.
Save aishwarya-singh25/d6902d4135a6a03e666897dcefb61ce2 to your computer and use it in GitHub Desktop.
CNN from scratch using numpy
filter_update = []
for i in range(f.shape[2]):
for j in range(f.shape[0]):
for k in range(f.shape[1]):
temp = 0
spos_row = j
spos_col = k
epos_row = spos_row + s_row
epos_col = spos_col + s_col
for l in range(X.shape[2]):
temp = temp + (X[spos_row:epos_row,spos_col:epos_col,l]*error_wrt_filter_output[:,:,i]).sum()
filter_update.append(temp/X.shape[2])
filter_update_array = np.array(filter_update)
filter_update_array = np.resize(filter_update_array,(f.shape[2],f.shape[0],f.shape[1]))
# converting into binary classification
for i in range(y.shape[1]):
if y[0][i] >4:
y[0][i] = 1
else:
y[0][i] = 0
#checking value counts
pd.Series(y[0]).value_counts()
# generating output of convolution layer
filter_output = []
# for each image
for i in range(len(new_image)):
# apply each filter
for k in range(f.shape[2]):
# do element wise multiplication
for j in range(new_image.shape[1]):
filter_output.append((new_image[i][j]*f[:,:,k]).sum())
filter_output = np.resize(np.array(filter_output), (len(new_image),f.shape[2],new_image.shape[1]))
# applying activation over convolution output
filter_output_sigmoid = sigmoid(filter_output)
filter_output.shape, filter_output_sigmoid.shape
# Generating patches from images
new_image = []
# for number of images
for k in range(X.shape[2]):
# sliding in horizontal direction
for i in range(X.shape[0]-f.shape[0]+1):
# sliding in vertical direction
for j in range(X.shape[1]-f.shape[1]+1):
new_image.append(X[:,:,k][i:i+f.shape[0],j:j+f.shape[1]])
# resizing the generated patches as per number of images
new_image = np.array(new_image)
new_image.resize((X.shape[2],int(new_image.shape[0]/X.shape[2]),new_image.shape[1],new_image.shape[2]))
new_image.shape
#Error w.r.t sigmoid output
output_layer_input_wrt_filter_output_sigmoid = wo.T
#Error w.r.t sigmoid transformation
filter_output_sigmoid_wrt_filter_output = filter_output_sigmoid * (1-filter_output_sigmoid)
# cvalculating derivatives for backprop convolution
error_wrt_filter_output = np.dot(output_layer_input_wrt_filter_output_sigmoid.T,error_wrt_output*output_wrt_output_layer_input) * filter_output_sigmoid_wrt_filter_output
error_wrt_filter_output = np.average(error_wrt_filter_output, axis=1)
error_wrt_filter_output = np.resize(error_wrt_filter_output,(X.shape[0]-f.shape[0]+1,X.shape[1]-f.shape[1]+1, f.shape[2]))
#Error
error = np.square(y-output)/2
#Error w.r.t Output (Gradient)
error_wrt_output = -(y-output)
#Error w.r.t sigmoid transformation (output_layer_input)
output_wrt_output_layer_input=output*(1-output)
#Error w.r.t weight
output_wrt_w=filter_output_sigmoid
# initializing filter
f=np.random.uniform(size=(3,5,5))
f = f.T
print('Filter 1', '\n', f[:,:,0], '\n')
print('Filter 2', '\n', f[:,:,1], '\n')
print('Filter 3', '\n', f[:,:,2], '\n')
for i in range(f.shape[2]):
f[:,:,i] = f[:,:,i] - lr*filter_update_array[i]
# generating input for fully connected layer
filter_output_sigmoid = filter_output_sigmoid.reshape((filter_output_sigmoid.shape[0],filter_output_sigmoid.shape[1]*filter_output_sigmoid.shape[2]))
filter_output_sigmoid = filter_output_sigmoid.T
# Linear trasnformation for fully Connected Layer
output_layer_input= np.dot(wo.T,filter_output_sigmoid)
output_layer_input = (output_layer_input - np.average(output_layer_input))/np.std(output_layer_input)
# activation function
output = sigmoid(output_layer_input)
# importing required libraries
import numpy as np
import pandas as pd
from tqdm import tqdm
from keras.datasets import mnist
# loading dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# selecting a subset of data (200 images)
x_train = x_train[:200]
y = y_train[:200]
X = x_train.T
X = X/255
y.resize((200,1))
y = y.T
#checking value
pd.Series(y[0]).value_counts()
# defining the Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
# derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
# number of features in data set
s_row = X.shape[0] - f.shape[0] + 1
s_col = X.shape[1] - f.shape[1] + 1
num_filter = f.shape[2]
inputlayer_neurons = (s_row)*(s_col)*(num_filter)
output_neurons = 1
# initializing weight
wo=np.random.uniform(size=(inputlayer_neurons,output_neurons))
#delta change in w for fully connected layer
delta_error_fcp = np.dot(output_wrt_w,(error_wrt_output * output_wrt_output_layer_input).T)
wo = wo - lr*delta_error_fcp
@peppernaut
Copy link

how do we print out the accuracy and predicted and expected output?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment