Skip to content

Instantly share code, notes, and snippets.

@demacdolincoln
Created August 3, 2018 17:25
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save demacdolincoln/bd10823903196be28401f9ca7c47ea81 to your computer and use it in GitHub Desktop.
Save demacdolincoln/bd10823903196be28401f9ca7c47ea81 to your computer and use it in GitHub Desktop.
simples exemplo de uso do pytorch para reconhecimento de números manuscritos
################################################################################
#
# leituras recomendadas (e que tambem precisei para escrever esse script):
# * https://matheusfacure.github.io/2017/05/15/deep-ff-ann-pytorch/
# * http://deeplearningbook.com.br/funcao-de-ativacao/
#
################################################################################
import torch
from torch import autograd, nn
from torch.nn import functional as F
from sklearn import datasets
import numpy as np
################################################################################
# criacao da rede neural
################################################################################
class Model(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
self.in_to_h1 = nn.Linear(input_size, hidden_size)
self.h1_to_h2 = nn.Linear(hidden_size, hidden_size)
self.h2_to_out = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = F.relu(self.in_to_h1(x))
x = F.relu(self.h1_to_h2(x))
x = self.h2_to_out(x)
return x
################################################################################
# preparacao do dataset
################################################################################
ds = datasets.load_digits()
x_batch = ds.data
y_batch = ds.target
x_batch = torch.FloatTensor(x_batch.tolist())
y_batch = torch.LongTensor(y_batch.tolist())
x_batch = autograd.Variable(x_batch)
y_batch = autograd.Variable(y_batch)
x_batch, y_batch = autograd.Variable(x_batch, ), autograd.Variable(y_batch)
################################################################################
# configuracoes
################################################################################
batch_size = x_batch.shape[0]
input_size = x_batch.shape[1]
hidden_size = 128
num_classes = len(ds.target_names)
learning_rate = 1e-5
print('inpt: ', x_batch)
print('target: ', y_batch.view(1, -1))
model = Model(input_size, hidden_size, num_classes)
print(model)
################################################################################
# treinamento
################################################################################
criterion = nn.CrossEntropyLoss() # define o custo de entropia cruzada
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
for epoch in range(100):
optimizer.zero_grad()
logit = model(x_batch)
loss = criterion(logit, y_batch)
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print(f'epoch: {epoch} | loss:{loss.item()}')
################################################################################
# resultados
################################################################################
out = model(x_batch)
import matplotlib.pyplot as plt
while True:
try:
index = int(input(f"digite um número até {batch_size - 1}: "))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.matshow(ds.images[index], cmap=plt.cm.gray_r)
ax1.set_title(f"valor esperado: {y_batch[index]}\n")
ax2.plot(out[index].detach().numpy())
ax2.grid(True)
ax2.set_title(f"valor previsto: {out[index].argmax()}\n")
plt.setp(ax2, xticks=list(range(num_classes)))
plt.show()
except:
break
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment