Skip to content

Instantly share code, notes, and snippets.

@giuseppebonaccorso
Last active February 13, 2024 19:34
Show Gist options
  • Save giuseppebonaccorso/54a06fc45d4b5e2845545757f02f5f2b to your computer and use it in GitHub Desktop.
Save giuseppebonaccorso/54a06fc45d4b5e2845545757f02f5f2b to your computer and use it in GitHub Desktop.
Sanger's rule (Hebbian Learning)
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
# Set random seed for reproducibility
np.random.seed(1000)
# Create and scale dataset
X, _ = make_blobs(n_samples=500, centers=2, cluster_std=5.0, random_state=1000)
scaler = StandardScaler(with_std=False)
Xs = scaler.fit_transform(X)
# Compute eigenvalues and eigenvectors
Q = np.cov(Xs.T)
eigu, eigv = np.linalg.eig(Q)
W_sanger = np.random.normal(scale=0.1, size=(2, 2))
prev_W_sanger = np.ones((2, 2))
learning_rate = 0.1
nb_iterations = 2000
t = 0.0
for i in range(nb_iterations):
prev_W_sanger = W_sanger.copy()
dw = np.zeros((2, 2))
t += 1.0
for j in range(Xs.shape[0]):
Ysj = np.dot(W_sanger, Xs[j]).reshape((2, 1))
QYd = np.tril(np.dot(Ysj, Ysj.T))
dw += np.dot(Ysj, Xs[j].reshape((1, 2))) - np.dot(QYd, W_sanger)
W_sanger += (learning_rate / t) * dw
W_sanger /= np.linalg.norm(W_sanger, axis=1).reshape((2, 1))
# Eigenvalues
print(eigu)
[ 0.67152209 1.33248593]
# Eigenvectors
print(eigv)
[[-0.70710678 -0.70710678]
[ 0.70710678 -0.70710678]]
# W_sanger at the end of the training process
print(W_sanger)
[[-0.72730535 -0.69957863]
[-0.67330094 0.72730532]]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment