Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
sequence_length = 10
print(sequence_length)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.gru = nn.GRU(input_size=5,hidden_size=5, num_layers=1, batch_first=True)
self.linear = nn.Linear(5,1)
def forward(self, x, h0):
h0 = h0.permute(1,0,2)
out, h = self.gru(x, h0)
x = out[:,-1,:]
x = self.linear(x)
x = torch.sigmoid(x)
return x
model = Net()
model = model.cuda()
# h0 = Variable(torch.randn(4,1,5).cuda())
#, requires_grad=True)
#optimizer = optim.Adam([h0], lr=0.001)
criterion = torch.nn.BCELoss()
print("model")
#print("model params: {}".format(list(model.parameters())))
data = torch.Tensor(
torch.normal(
torch.zeros(4, sequence_length, 5),
torch.ones(4, sequence_length,5)
)
)
data = Variable(data).cuda().detach().requires_grad_(True)
label = torch.from_numpy(np.array([1,0,1,0]).astype('float32'))
label = label.cuda()
# optimizer = optim.Adam([data], lr=0.001)
optimizer = optim.Adam(model.parameters(), lr=0.001)
print("data")
for epoch in range(1000):
h0 = Variable(torch.randn(4,1,5)).cuda()
output = model(data, h0)
#print(output.shape)
#print(output)
loss = criterion(output, label)
print("loss ", float(loss))
optimizer.zero_grad()
loss.backward()
print(data.grad, h0.grad)
optimizer.step()
event = 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, 1 * len(data),
1,
100. * 1 / 1,
float(loss) / 1)
#print(event)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment