Skip to content

Instantly share code, notes, and snippets.

@theeluwin
Created January 24, 2018 15:22
Show Gist options
  • Save theeluwin/e62989c56f97be8f50bf276ff34c3919 to your computer and use it in GitHub Desktop.
Save theeluwin/e62989c56f97be8f50bf276ff34c3919 to your computer and use it in GitHub Desktop.
# -*- coding: utf-8 -*-
import os
import json
import torch
import pickle
import requests
import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch import FloatTensor as FT
from torch import LongTensor as LT
from torch.optim import Adam
from torch.autograd import Variable as V
from torch.utils.data import Dataset, DataLoader
class Lotto(Dataset):
def __init__(self, logs, window=10):
self.logs = logs
self.data = []
for i in range(len(logs) - window):
self.data.append(self.logs[i: i + window + 1])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class Lottery(nn.Module):
def __init__(self, x_dim=45, h_dim=128):
super(Lottery, self).__init__()
self.x_dim = x_dim
self.h_dim = h_dim
self.lstm = nn.LSTM(self.x_dim, self.h_dim)
self.fc = nn.Linear(self.h_dim, self.x_dim)
def init_hidden(self, batch_size):
hidden = V(t.zeros(1, batch_size, self.h_dim), requires_grad=False)
return hidden
def forward(self, x):
batch_size = x.size()[1]
h = self.init_hidden(batch_size)
c = self.init_hidden(batch_size)
out, (h, c) = self.lstm(x, (h, c))
out = F.leaky_relu(h.squeeze())
out = self.fc(out)
return out
N = 644
M = 45
K = 6
W = 10
every = 10
num_epochs = 100
if not os.path.isfile('logs.dat'):
urlt = "http://www.nlotto.co.kr/common.do?method=getLottoNumber&drwNo={}"
logs = []
for i in range(1, N + 1):
data = json.loads(requests.get(urlt.format(i)).content)
indicies = [data['drwtNo{}'.format(j)] - 1 for j in range(1, K + 1)]
log = np.zeros(M)
log[indicies] = 1
logs.append(log)
pickle.dump(logs, open('logs.dat', 'wb'))
else:
logs = pickle.load(open('logs.dat', 'rb'))
model = Lottery()
model.train()
modelpath = 'pts/lottery.pt'
optim = Adam(model.parameters())
optimpath = 'pts/lottery.optim.pt'
losser = nn.BCEWithLogitsLoss()
dataset = Lotto(logs, window=W)
dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=True)
for epoch in range(num_epochs):
seq = next(iter(dataloader))
X, y = torch.stack(seq[:-1]), seq[-1]
X = V(X.float(), requires_grad=False)
y = V(y.float(), requires_grad=False)
z = model(X)
loss = losser(z, y)
optim.zero_grad()
loss.backward()
optim.step()
if not epoch % every:
print("Epoch %03d: %.06f" % (epoch, loss.data[0]))
X = torch.stack(FT(logs[-10:])).unsqueeze(1)
X = V(X, requires_grad=False)
z = model(X)
_, indicies = z.topk(6)
pred = sorted([index + 1 for index in indicies.data])
print(pred)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment