Skip to content

Instantly share code, notes, and snippets.

@saschalippert
Created July 30, 2019 16:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save saschalippert/36b72313afff86e00f3e10254fb4ff25 to your computer and use it in GitHub Desktop.
Save saschalippert/36b72313afff86e00f3e10254fb4ff25 to your computer and use it in GitHub Desktop.
from collections import deque
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
class RNN(nn.Module):
def __init__(self, input_size, output_size, hidden_size, n_layers):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.rnn = nn.RNN(input_size, hidden_size, n_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
# x (batch_size, seq_length, input_size)
# hidden (n_layers, batch_size, hidden_dim)
# r_out (batch_size, time_step, hidden_size)
out, hidden = self.rnn(input, hidden)
out = out[:,-1,:] #only last sequence is evaluated
out = self.fc(out)
return out, hidden
def init_hidden(self, batch_size):
return torch.zeros(self.n_layers, batch_size, self.hidden_size)
seq_length = 20
input_size = 1
output_size = 1
hidden_dim = 64
n_layers = 1
batch_size = 32
n_epoches = 10
time = np.arange(0, 100, 0.01);
data = np.sin(time)
rnn = RNN(input_size, output_size, hidden_dim, n_layers)
print(rnn)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=0.001)
def batch_data(data, sequence_length, batch_size):
window_len = sequence_length + 1
sequences = len(data) - window_len
inputs = np.zeros((sequences, sequence_length), dtype=np.float32)
targets = np.zeros((sequences), dtype=np.float32)
for start in range(0, sequences):
end = start + sequence_length
inputs[start] = np.array(data[start:end])
targets[start] = np.array(data[end])
dataset = TensorDataset(torch.from_numpy(inputs), torch.from_numpy(targets))
dataloader = DataLoader(dataset, shuffle=False, batch_size=batch_size, drop_last=True)
return dataloader
train_loader = batch_data(data, seq_length, batch_size)
def train(rnn, n_epochs):
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
epoch_losses = []
gen_out = deque(maxlen=len(train_loader) * batch_size)
for batch_i, (inputs, targets) in enumerate(train_loader):
hidden = rnn.init_hidden(batch_size)
inputs = inputs.reshape((batch_size, seq_length, 1))
targets = targets.reshape((batch_size, 1))
prediction, hidden = rnn(inputs, hidden)
loss = criterion(prediction, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.data.numpy())
np_out = prediction.detach().numpy().flatten()
for out in enumerate(np_out):
gen_out.append(out[1])
plt.plot(gen_out)
plt.show()
print('Epoch: {:>4}/{:<4} Loss: {}'.format(epoch_i, n_epochs, np.average(epoch_losses)))
return rnn
trained_rnn = train(rnn, n_epoches)
def generate(rnn, current_seq, predict_len=10000):
rnn.eval()
gen_seq = deque(current_seq, maxlen = seq_length)
gen_out = deque(maxlen = predict_len)
for i in range(predict_len):
hidden = rnn.init_hidden(1)
gen_seq_torch = torch.from_numpy(np.array(gen_seq, dtype=np.float32))
inputs = gen_seq_torch.reshape((1, seq_length, 1))
output, hidden = rnn(inputs, hidden)
np_out = output.detach().numpy()[0][0].astype(float)
gen_out.append(np_out)
gen_seq.append(np_out)
return gen_out
generated = generate(trained_rnn, data[0:20])
plt.plot(generated)
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment