Last active
May 16, 2024 07:47
-
-
Save derbydefi/616b1d5b986610f7cb5120ccaac85915 to your computer and use it in GitHub Desktop.
Extreme Learning Machine Python Implementation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import torch | |
import torchvision | |
import torchvision.transforms as transforms | |
class ELM(torch.nn.Module): | |
def __init__(self, input_size, hidden_size, output_size): | |
super(ELM, self).__init__() | |
self.hidden_size = hidden_size | |
self.input_weights = torch.nn.Parameter(torch.randn(input_size, hidden_size) * 0.1, requires_grad=False) | |
self.biases = torch.nn.Parameter(torch.randn(hidden_size) * 0.1, requires_grad=False) | |
self.output_weights = None # To be determined analytically | |
def forward(self, x): | |
H = torch.sigmoid(torch.add(torch.matmul(x, self.input_weights), self.biases)) | |
return H | |
def train_elm(model, X, T): | |
start_time = time.time() | |
with torch.no_grad(): | |
H = model(X) | |
H_pseudo_inverse = torch.pinverse(H) | |
model.output_weights = torch.matmul(H_pseudo_inverse, T) | |
end_time = time.time() | |
print(f"Training Time: {end_time - start_time:.4f} seconds") | |
def predict(model, X): | |
start_time = time.time() | |
with torch.no_grad(): | |
H = model(X) | |
predictions = torch.matmul(H, model.output_weights) | |
end_time = time.time() | |
print(f"Prediction Time: {end_time - start_time:.4f} seconds") | |
return torch.argmax(predictions, dim=1) | |
def accuracy(y_true, y_pred): | |
return (y_true == y_pred).float().mean() | |
# Load MNIST data | |
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) | |
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform) | |
trainloader = torch.utils.data.DataLoader(trainset, batch_size=len(trainset), shuffle=True) | |
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) | |
testloader = torch.utils.data.DataLoader(testset, batch_size=len(testset), shuffle=False) | |
# Prepare the data | |
for inputs, targets in trainloader: | |
X_train = inputs.view(-1, 28*28) | |
T_train = torch.nn.functional.one_hot(targets, num_classes=10).float() | |
break | |
for inputs, targets in testloader: | |
X_test = inputs.view(-1, 28*28) | |
y_test = targets | |
break | |
print("data preparing done") | |
# Model Configuration | |
input_size = 784 # 28x28 pixels | |
hidden_size = 1000 # Number of hidden neurons | |
output_size = 10 # Number of output classes | |
model = ELM(input_size, hidden_size, output_size) | |
# Training | |
train_elm(model, X_train, T_train) | |
# Testing | |
predictions = predict(model, X_test) | |
test_accuracy = accuracy(y_test, predictions) | |
print(f"Test Accuracy: {test_accuracy.item() * 100:.2f}%") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
1000 Hidden Neurons:
Training Time: 2.3480 seconds
Prediction Time: 0.0872 seconds
Test Accuracy: 94.12%
5000 Hidden Neurons:
Training Time: 39.7465 seconds
Prediction Time: 0.3457 seconds
Test Accuracy: 97.28%
10000 Hidden Neurons:
Training Time: 213.8608 seconds
Prediction Time: 0.6887 seconds
Test Accuracy: 97.66%