Skip to content

Instantly share code, notes, and snippets.

@rp4ri
Last active March 12, 2023 06:56
Show Gist options
  • Save rp4ri/f8bd13707f16f8d05d1c22415485ffa3 to your computer and use it in GitHub Desktop.
Save rp4ri/f8bd13707f16f8d05d1c22415485ffa3 to your computer and use it in GitHub Desktop.
Neural Networks
# Autoencoder pytorch
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
# Load the data
final_df = pd.read_csv('table.csv')
# Transpose final_df
final_df = final_df
final_df.head(5)
# Create the autoencoder
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(8, 1)
)
self.decoder = nn.Sequential(
nn.Linear(1, 8)
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
# hyperparameters
learning_rate = 0.1 # learning rate for the optimizer
# Create the model and optimizer
model = Autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
losses = []
for i in range(final_df.shape[1]):
# Get the data
data = torch.tensor(final_df.iloc[i].values).float()
# Forward pass
output = model(data)
# Calculate the loss
loss = torch.nn.functional.mse_loss(output, data)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss)
# Plot the loss
import matplotlib.pyplot as plt
# detach the loss from the graph
losses = [l.detach().numpy() for l in losses]
plt.plot(losses)
# save plt
plt.savefig('grafico-perdida.png')
# show all parameters of the model
print(model.state_dict())
# show weights of the encoder
print(model.encoder[0].weight)
# show bias of the encoder
print(model.encoder[0].bias)
# export state_dict
torch.save(model.state_dict(), 'modelo_dict.pt')
torch.save(model, 'modelo.pt')
# FNN pytorch
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
# Load the data
final_df = pd.read_csv('table.csv')
# Transpose final_df
final_df = final_df
final_df.head(5)
# Define the model
class FeedForwardNN(nn.Module):
def __init__(self):
super(FeedForwardNN, self).__init__()
self.fc1 = nn.Linear(4, 8)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(8, 4)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# hyperparameters
learning_rate = 0.1 # learning rate for the optimizer
# Create the model and optimizer
model = FeedForwardNN()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
losses = []
for i in range(final_df.shape[1]):
# Get the inputs
# input is the first 4 columns of the dataframe
# label is the last 4 columns of the dataframe
inputs = torch.from_numpy(final_df.iloc[:, :4].values).float()
labels = torch.from_numpy(final_df.iloc[:, 4:].values).float()
# Forward pass
output = model(inputs)
# Calculate the loss
loss = torch.nn.functional.mse_loss(output, labels)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss)
# Plot the loss
import matplotlib.pyplot as plt
# detach the loss from the graph
losses = [l.detach().numpy() for l in losses]
plt.plot(losses)
# save plt
plt.savefig('grafico-perdida.png')
# show all parameters of the model
print(model.state_dict())
# export state_dict
torch.save(model.state_dict(), 'modelo_dict.pt')
torch.save(model, 'modelo.pt')
# MLP pytorch
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
# Load the data
final_df = pd.read_csv('table.csv')
# Transpose final_df
final_df = final_df
final_df.head(5)
# Define the model
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(4, 8) # First hidden layer with 8 neurons
self.fc2 = nn.Linear(8, 1) # Output layer with 1 neuron
self.activation = nn.ReLU() # Activation function for the hidden layers
def forward(self, x):
out = self.fc1(x)
out = self.activation(out)
out = self.fc2(out)
return out
# hyperparameters
learning_rate = 0.1 # learning rate for the optimizer
# Create the model and optimizer
model = MLP()
criterion = nn.BCEWithLogitsLoss() # binary cross-entropy loss
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
losses = []
for i in range(final_df.shape[1]):
# Get the inputs
# input is the first 4 columns of the dataframe
# label is the 5 column of the dataframe
inputs = torch.from_numpy(final_df.iloc[:, :4].values).float()
labels = torch.from_numpy(final_df.iloc[:, 4:5].values).float()
# Forward pass
output = model(inputs)
# Calculate the loss
loss = criterion(output, labels)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss)
# Plot the loss
import matplotlib.pyplot as plt
# detach the loss from the graph
losses = [l.detach().numpy() for l in losses]
plt.plot(losses)
# save plt
plt.savefig('grafico-perdida.png')
# show all parameters of the model
print(model.state_dict())
# export state_dict
torch.save(model.state_dict(), 'modelo_dict.pt')
torch.save(model, 'modelo.pt')

Popular and simple neural network architectures

Here are a few popular and simple neural network architectures:

Perceptron:

perceptron.py

It is a simple linear classifier that can be used for binary classification problems.

Multilayer Perceptron (MLP):

mlp.py

It is a feedforward neural network with one or more layers of perceptrons. MLP can be used for a variety of tasks such as image classification, natural language processing, and time series prediction.

Autoencoder:

autoencoder.py

It is a neural network that is trained to copy its input to its output. Autoencoders are commonly used for dimensionality reduction and feature extraction.

Feedforward Neural Network (FNN):

fnn.py

It is a type of artificial neural network that is composed of layers of interconnected nodes or neurons. FNNs are commonly used for classification and regression tasks.

Convolutional Neural Network (CNN):

cnn.py

It is a neural network designed to process data with a grid-like topology, such as an image. CNNs are commonly used in computer vision tasks such as image classification, object detection, and semantic segmentation.

Recurrent Neural Network (RNN):

rnn.py

It is a neural network that can process sequential data, such as time series or natural language. RNNs are commonly used in tasks such as language modeling, speech recognition, and machine translation.

Long Short-Term Memory (LSTM):

lstm.py

It is a variant of RNN that is designed to handle the problem of vanishing gradients. LSTMs are commonly used in tasks such as language modeling, speech recognition, and machine translation.

Please note that above list is simple and widely used architectures, but this is not an exhaustive list.

# Perceptron pytorch
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
# Load the data
final_df = pd.read_csv('table.csv')
# Transpose final_df
final_df = final_df
final_df.head(5)
# Define the model
class Perceptron(nn.Module):
def __init__(self):
super(Perceptron, self).__init__()
self.fc = nn.Linear(4, 1) # Only one output neuron
def forward(self, x):
out = self.fc(x)
return out
# hyperparameters
learning_rate = 0.1 # learning rate for the optimizer
# Create the model and optimizer
model = Perceptron()
criterion = nn.BCEWithLogitsLoss() # binary cross-entropy loss
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
losses = []
for i in range(final_df.shape[1]):
# Get the inputs
# input is the first 4 columns of the dataframe
# label is the 5 column of the dataframe
inputs = torch.from_numpy(final_df.iloc[:, :4].values).float()
labels = torch.from_numpy(final_df.iloc[:, 4:5].values).float()
# Forward pass
output = model(inputs)
# Calculate the loss
loss = criterion(output, labels)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss)
# Plot the loss
import matplotlib.pyplot as plt
# detach the loss from the graph
losses = [l.detach().numpy() for l in losses]
plt.plot(losses)
# save plt
plt.savefig('grafico-perdida.png')
# show all parameters of the model
print(model.state_dict())
# export state_dict
torch.save(model.state_dict(), 'modelo_dict.pt')
torch.save(model, 'modelo.pt')
A B C D E F G H
49.1 48.7 49.02 49.03 49.01 49.0 49.02 49.01
49.18 48.94 49.6 49.01 49.01 49.01 49.01 49.02
49.18 48.85 49.02 49.1 49.03 49.0 49.04 49.04
49.48 49.27 49.33 49.0 49.03 49.01 49.01 49.04
49.36 48.96 49.82 49.0 49.01 49.02 49.01 49.04
49.21 49.58 49.02 49.0 49.02 49.0 49.01 49.03
49.08 48.97 49.98 49.0 49.02 49.01 49.02 49.02
48.88 48.76 49.13 49.77 49.01 49.0 49.01 49.04
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment