Skip to content

Instantly share code, notes, and snippets.

View Mehdi-Amine's full-sized avatar
💭
Having fun with Neural Networks

Mehdi Mehdi-Amine

💭
Having fun with Neural Networks
View GitHub Profile
@Mehdi-Amine
Mehdi-Amine / perceptron-class.py
Last active June 26, 2023 08:46
perceptron class
class Perceptron:
def __init__(self, input_size):
np.random.seed(42)
self.sizes = [input_size, 1]
self.bias = np.random.randn(1, 1)
self.weights = np.random.randn(1, input_size)
# used for plotting convergence
self.parameters_as_they_change = [np.concatenate((self.bias[0], self.weights.squeeze()), axis=0)]
print("Generated Perceptron:")
perceptron = Perceptron(2) # two layers: input and output
perceptron.sgd(training_data=training_data, mini_batch_size=80, epochs=800, eta=0.01)
'''
Out:
Generated Perceptron:
Sizes: [2, 1]
With random parameters:
Bias: [[0.49671415]]
Weights: [[-0.1382643 0.64768854]]
-------------------------------------------------------------
@Mehdi-Amine
Mehdi-Amine / sgdreg-sklearn.py
Created April 21, 2020 13:23
Sklearn sgdregressor
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=100, tol=None, penalty=None, eta0=0.01, learning_rate='constant')
sgd_reg.fit(dx_train, dy_train)
print(f"Optimized Parameters: \nBias: {round(sgd_reg.intercept_[0], 2)}, Weights: {sgd_reg.coef_}")
'''
Out:
Optimized Parameters:
Bias: 0.0, Weights: [1. 0.5]
'''
dx1_standardized = (dx_train[:,0] - dx_train[:,0].mean()) / dx_train[:,0].std()
dx2_standardized = (dx_train[:,1] - dx_train[:,1].mean()) / dx_train[:,1].std()
dx_train_standardized = np.concatenate((dx1_standardized.reshape(-1,1), dx2_standardized.reshape(-1,1)), axis=1)
dy_train_with_standardized_x = (dx1_standardized + dx2_standardized * 0.5)
training_data_std = list(zip(dx_train_standardized, dy_train_with_standardized_x))
# Printing the first 5 rows:
print(f"Standardized X: \n{dx_train_standardized[:5]} \nStandardized y: \n{dy_train_with_standardized_x[:5]}")
'''
from sklearn.preprocessing import StandardScaler
std_scaler = StandardScaler()
dx_train_standardized_sklearn = std_scaler.fit_transform(dx_train)
# Printing the first 5 rows
print(f"Training data standardized using Scikit-Learn: \n{dx_train_standardized_sklearn[:5]}")
'''
Out:
dx1_normalized = (dx_train[:,0] - dx_train[:,0].min()) / (dx_train[:,0].max() - dx_train[:,0].min())
dx2_normalized = (dx_train[:,1] - dx_train[:,1].min()) / (dx_train[:,1].max() - dx_train[:,1].min())
dx_train_normalized = np.concatenate((dx1_normalized.reshape(-1,1), dx2_normalized.reshape(-1,1)), axis=1)
dy_train_with_normalized_x = (dx1_normalized + dx2_normalized * 0.5)
training_data_normd = list(zip(dx_train_normalized, dy_train_with_normalized_x))
# Printing the first 5 rows:
print(f"Normalized X: \n{dx_train_normalized[:5]} \ny: \n{dy_train_with_normalized_x[:5]}")
'''
from sklearn.preprocessing import MinMaxScaler
norm_scaler = MinMaxScaler()
dx_train_normalized_sklearn = norm_scaler.fit_transform(dx_train)
# Printing the first 5 rows
print(f"Training data normalized using Scikit-Learn: \n{dx_train_normalized_sklearn[:5]}")
'''
Out:
Training data normalized using Scikit-Learn:
@Mehdi-Amine
Mehdi-Amine / plot-sgds.py
Created April 21, 2020 16:35
investigating the effect of feature scaling on SGD
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter3d(x = params[:, 0], y = params[:, 1], z = params[:, 2],
mode='lines',
name='No Scaling',
line=dict(color='green', width=2)))
fig.add_trace(go.Scatter3d(x = params_std[:, 0], y = params_std[:, 1], z = params_std[:, 2],
mode='lines',
name='Standardization',
def sigmoid_prime(z):
sig = sigmoid(z)
return sig * (1 - sig)
delta = cost_derivative(activation, y) * sigmoid_prime(z)
# Equation 6
nabla_b = delta
# Equation 7
nabla_w = numpy.dot(delta, instance.reshape(1,-1))