Created
May 21, 2018 23:27
-
-
Save tbenst/d80b542b43dc098ca8aa6e6df555d6e6 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch as T | |
import torch | |
import torch.nn.functional as F | |
import torch.nn as nn | |
from torch.utils.data import DataLoader, Dataset | |
import numpy as np | |
from __future__ import print_function | |
import gc | |
class MyData(Dataset): | |
def __init__(self, u, p, x,n_future_steps=1): | |
self.x = nn.Parameter(x,requires_grad=False) | |
self.p = nn.Parameter(p,requires_grad=False) | |
self.u = nn.Parameter(u,requires_grad=False) | |
self.nfeatures = x.shape[1] | |
self.n_future_steps = n_future_steps | |
def __len__(self): | |
return len(self.x)-self.n_future_steps | |
def __getitem__(self, idx): | |
indices = slice(idx,idx+self.n_future_steps) | |
x_true_indices = slice(idx+1,idx+self.n_future_steps+1) | |
return (self.u[indices], self.p[indices], | |
self.x[indices], self.x[x_true_indices]) | |
class Dynamics(nn.Module): | |
def __init__(self, nfeatures): | |
super(Dynamics, self).__init__() | |
self.A = nn.Parameter(T.normal(T.zeros(nfeatures,nfeatures),0.5),requires_grad=True) | |
self.B = nn.Parameter(T.normal(T.zeros(nfeatures,nfeatures),0.5),requires_grad=True) | |
self.C = nn.Parameter(T.normal(T.zeros(nfeatures),0.5),requires_grad=True) | |
def forward(self, u, p, x): | |
return (x[:,0] + (T.matmul((self.A + p[:,0,None,None]*self.B), x[:,0,:,None]).squeeze()) + u[:,0,None] * self.C)[:,None] | |
def train(model,data,nepochs=10, lambdaA=1e-8, lambdaB=1e-6, lr=0.001): | |
dataloader = DataLoader(data, batch_size=batch_size, shuffle=True) | |
optimizer = T.optim.Adam(model.parameters(),lr=lr) | |
og_mem = T.cuda.memory_allocated() / 1024**2 | |
print("Allocated Memory: {} MB".format(og_mem)) | |
for e in range(nepochs): | |
for batch_data in dataloader: | |
U,P,X, X_true = batch_data | |
X_pred = model(U,P,X) | |
mse_loss = F.mse_loss(X_pred,X_true) | |
l1_B = model.B.norm(1) | |
l1_A = model.A.norm(1) | |
loss = mse_loss + lambdaA*l1_A + lambdaB*l1_B | |
optimizer.zero_grad() | |
loss.backward() | |
optimizer.step() | |
del X_pred, U,P,X, X_true, mse_loss, l1_A, l1_B, loss | |
gc.collect() | |
torch.cuda.empty_cache() | |
mem = T.cuda.memory_allocated() / 1024**2 | |
print("New allocations: {} MB".format(mem-og_mem)) | |
og_mem = mem | |
ntime = 2826 | |
nstim = 30 | |
nfeatures = 15888 | |
u_train = T.rand(ntime).cuda() | |
p_train = T.rand(ntime).cuda() | |
time_train = T.from_numpy(np.arange(ntime)).cuda() | |
x_train = T.rand(ntime,nfeatures).cuda() | |
n_future_steps = 1 | |
batch_size = 1 | |
data = MyData(u_train,p_train,x_train,n_future_steps) | |
model = Dynamics(data.nfeatures) | |
model.to("cuda") | |
og_mem = T.cuda.memory_allocated() / 1024**2 | |
print("Allocated Memory: {} MB".format(og_mem)) | |
with T.no_grad(): | |
for t in range(10): | |
x_pred = T.squeeze(model(u_train[[t],None], p_train[[t],None], | |
x_train[[t],None])) | |
mem = T.cuda.memory_allocated() / 1024**2 | |
print("New allocations: {} MB".format(mem-og_mem)) | |
train(model,data,1,1e-4,1e-6,lr=0.001) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment