Created
October 3, 2020 03:09
-
-
Save truongthanhdat/ff95fb514064e4768bc13b0184a89182 to your computer and use it in GitHub Desktop.
Multi Linear Perceptron
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import random | |
import numpy as np | |
import time | |
from tqdm import tqdm | |
import matplotlib.pyplot as plt | |
class Dataset(torch.utils.data.Dataset): | |
def __init__(self, N = 1000, o = (0, 0), r = 1, balance = True): | |
super(Dataset, self).__init__() | |
self.inputs, self.labels = Dataset.generate_data(N, o, r, balance) | |
def __len__(self): | |
return len(self.inputs) | |
def __getitem__(self, index): | |
x = np.array(self.inputs[index]).reshape(-1) | |
y = np.array(self.labels[index]).reshape(-1) | |
x = torch.from_numpy(x).float() | |
y = torch.from_numpy(y).float() | |
return x, y | |
@staticmethod | |
def is_inside(p, o, r): | |
v = (p[0] - o[0]) ** 2 + (p[1] - o[1]) ** 2 | |
if v > (r * r): | |
return 0 | |
else: | |
return 1 | |
@staticmethod | |
def generate_data(N, o = (0, 0), r = 1, balance = True): | |
inputs = [] | |
labels = [] | |
inside = 0 | |
for i in range(N): | |
if not balance: | |
x = random.uniform(-2, 2) | |
y = random.uniform(-2, 2) | |
inputs.append([x, y]) | |
labels.append(Dataset.is_inside((x, y), o, r)) | |
if labels[-1] == 1: | |
inside += 1 | |
else: | |
labels.append(i % 2) | |
if i % 2 == 1: inside += 1 | |
while True: | |
x = random.uniform(-2, 2) | |
y = random.uniform(-2, 2) | |
if Dataset.is_inside((x, y), o, r) != (i % 2): | |
break | |
inputs.append([x, y]) | |
return inputs, labels | |
class MLP(torch.nn.Module): | |
def __init__(self, input_dim = 2, hidden_dims = [10]): | |
super(MLP, self).__init__() | |
hiddens = [] | |
prev_dim = input_dim | |
for hidden_dim in hidden_dims: | |
hiddens.append(torch.nn.Linear(prev_dim, hidden_dim)) | |
hiddens.append(torch.nn.Tanh()) | |
prev_dim = hidden_dim | |
self.hidden = torch.nn.Sequential(*hiddens) | |
self.output = torch.nn.Linear(prev_dim, 1) | |
def forward(self, x): | |
x = self.hidden(x) | |
x = self.output(x) | |
x = torch.sigmoid(x) | |
return x | |
def calculate_accuracy(outputs, targets): | |
outputs = outputs >= 0.5 | |
outputs = outputs.int() | |
targets = targets.int() | |
acc = (outputs == targets).float().mean() | |
return acc | |
def evaluate(net, dataloader, loss_fn): | |
loss_fn = torch.nn.BCELoss() | |
inputs, targets = next(dataloader.__iter__()) | |
if torch.cuda.is_available(): | |
inputs = inputs.cuda() | |
targets = targets.cuda() | |
outputs = net(inputs) | |
loss = loss_fn(outputs, targets) | |
acc = calculate_accuracy(outputs, targets) | |
return loss, acc | |
def train(net, train_dataloader, val_dataloader, test_dataloader, num_iter, lr): | |
loss_fn = torch.nn.BCELoss() | |
trains = [] | |
vals = [] | |
tests = [] | |
for i in range(num_iter): | |
inputs, targets = next(train_dataloader.__iter__()) | |
if torch.cuda.is_available(): | |
inputs = inputs.cuda() | |
targets = targets.cuda() | |
outputs = net(inputs) | |
loss = loss_fn(outputs, targets) | |
net.zero_grad() | |
loss.backward() | |
for f in net.parameters(): | |
f.data.sub_(f.grad.data * lr) | |
acc = calculate_accuracy(outputs, targets) | |
print("Iteration [%04d/%04d]. Cross Entropy Loss: %0.4f. Accuracy: %0.4f" % (i + 1, num_iter, loss, acc)) | |
train_loss, train_acc = evaluate(net, train_dataloader, loss_fn) | |
val_loss, val_acc = evaluate(net, val_dataloader, loss_fn) | |
test_loss, test_acc = evaluate(net, test_dataloader, loss_fn) | |
trains.append([train_loss.item(), train_acc.item()]) | |
vals.append([val_loss.item(), val_acc.item()]) | |
tests.append([test_loss.item(), test_acc.item()]) | |
print("Accuracy on Test Set: %0.4f" % test_acc ) | |
return trains, vals, tests | |
if __name__ == "__main__": | |
train_dataset = Dataset(10000) | |
val_dataset = Dataset(2000) | |
test_dataset = Dataset(2000) | |
train_dataloader = torch.utils.data.DataLoader(train_dataset, | |
batch_size = len(train_dataset), | |
drop_last = False) | |
val_dataloader = torch.utils.data.DataLoader(val_dataset, | |
batch_size = len(val_dataset), | |
drop_last = False) | |
test_dataloader = torch.utils.data.DataLoader(test_dataset, | |
batch_size = len(test_dataset), | |
drop_last = False) | |
for num_iter in [10, 100, 1000]: | |
lr = 0.01 | |
net = MLP(input_dim = 2, hidden_dims = [1000]) | |
if torch.cuda.is_available(): | |
net.cuda() | |
trains, vals, tests = train(net, train_dataloader, val_dataloader, test_dataloader, num_iter, lr) | |
plt.figure() | |
plt.title("Losses") | |
plt.xlabel("Iteration") | |
plt.ylabel("Loss") | |
plt.plot( | |
list(range(1, num_iter + 1)), | |
[v[0] for v in trains] | |
) | |
plt.plot( | |
list(range(1, num_iter + 1)), | |
[v[0] for v in vals] | |
) | |
plt.legend(["Train", "Val"]) | |
plt.savefig("train_val_iter_%d.png" % num_iter) | |
plt.figure() | |
plt.title("Accuracy") | |
plt.xlabel("Iteration") | |
plt.ylabel("Accuracy") | |
plt.plot( | |
list(range(1, num_iter + 1)), | |
[v[1] for v in tests] | |
) | |
plt.legend(["Train", "Val"]) | |
plt.savefig("test_iter_%d.png" % num_iter) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment