Skip to content

Instantly share code, notes, and snippets.

@kongqi404
Created March 21, 2023 13:09
Show Gist options
  • Save kongqi404/336a8031901a3cebc05585b78fa650ef to your computer and use it in GitHub Desktop.
Save kongqi404/336a8031901a3cebc05585b78fa650ef to your computer and use it in GitHub Desktop.
probabilistic forcasting
class GaussianDistribution(pl.LightningModule):
def __init__(self, model: torch.nn.Module, seq_len: int, pred_len: int,
data_path: str,
quantile: list[float] = [0.1, 0.25, 0.5, 0.75, 0.9],
lr: float = 0.00001,
batch_size: int = 32,
pdf_path: str = "./") -> None:
super().__init__()
self.save_hyperparameters(ignore=["model"])
self.model = model
self.data_pl = ElectricDataModule(
batch_size=batch_size, data_path=data_path)
self.lr = lr
self.pdf_str = pdf_path+self.model.__class__.__name__ + \
str(time.strftime("%Y-%m-%d-%H%M%S", time.localtime()))+".pdf"
self.quantile = torch.as_tensor(quantile)
self.seq_len = seq_len
self.pred_len = pred_len
def forward(self, x):
self.model(x)
def configure_optimizers(self) -> torch.optim.Optimizer:
return torch.optim.Adam(self.model.parameters(), lr=self.lr)
def training_step(self, train_batch, batch_idx):
batch_x, batch_y, batch_x_mark, batch_y_mark = train_batch
mu, sigma = self.model(batch_x)
f_dim = 0
mu_new = mu.detach()
distritution = torch.distributions.Normal(
mu_new[:, -self.pred_len:, f_dim],
sigma[:, -self.pred_len:, f_dim])
mse_loss = F.mse_loss(mu[:, -self.pred_len:, f_dim],
batch_y[:, -self.pred_len:, f_dim])
loss = - \
distritution.log_prob(
batch_y[:, -self.pred_len:, f_dim]).mean() + mse_loss
self.log("train_loss", loss)
return loss
def validation_step(self, valid_batch, batch_idx):
batch_x, batch_y, batch_x_mark, batch_y_mark = valid_batch
mu, sigma = self.model(batch_x)
f_dim = 0
distritution = torch.distributions.Normal(
mu[:, -self.pred_len:, f_dim], sigma[:, -self.pred_len:, f_dim])
mse_loss = F.mse_loss(mu[:, -self.pred_len:, f_dim],
batch_y[:, -self.pred_len:, f_dim])
loss = - \
distritution.log_prob(
batch_y[:, -self.pred_len:, f_dim]).mean() + mse_loss
self.log("val_loss", loss)
def on_test_epoch_start(self):
self.pdf = PdfPages(self.pdf_str)
def test_step(self, test_batch, batch_idx):
batch_x, batch_y, batch_x_mark, batch_y_mark = test_batch
mu, sigma = self.model(batch_x)
distritution = torch.distributions.Normal(
mu[:, -self.pred_len:, :], sigma[:, -self.pred_len:, :])
mse_loss = F.mse_loss(mu[:, -self.pred_len:, :],
batch_y[:, -self.pred_len:, :])
loss = - \
distritution.log_prob(
batch_y[:, -self.pred_len:, :]).mean() + mse_loss
self.log("test_loss", loss)
sample = distritution.sample((100,))
self.quantile = self.quantile.to(batch_x.device)
sample_quantile = torch.quantile(sample, self.quantile, dim=0)
lower_ninety = sample_quantile[0]
lower_quarter = sample_quantile[1]
mean = distritution.mean
upper_quarter = sample_quantile[3]
upper_ninety = sample_quantile[4]
outputs_true = self.data_pl.inverse_transform(
data=mean.reshape(-1, mean.size(-1)).cpu().detach()
.numpy()).reshape(mean.size())
lower_ninety = self.data_pl.inverse_transform(
data=lower_ninety.reshape(-1, lower_ninety.size(-1)
).cpu().detach()
.numpy()).reshape(lower_ninety.size())
lower_quarter = self.data_pl.inverse_transform(
data=lower_quarter.reshape(-1, lower_quarter.size(-1)
).cpu().detach()
.numpy()).reshape(lower_quarter.size())
upper_quarter = self.data_pl.inverse_transform(
data=upper_quarter.reshape(-1, upper_quarter.size(-1)
).cpu().detach()
.numpy()).reshape(upper_quarter.size())
upper_ninety = self.data_pl.inverse_transform(
data=upper_ninety.reshape(-1, upper_ninety.size(-1)
).cpu().detach()
.numpy()).reshape(upper_ninety.size())
batch_y_true = self.data_pl.inverse_transform(
data=batch_y.reshape(-1, batch_y.size(-1)).cpu().detach()
.numpy()).reshape(batch_y.size())
batch_x_true = self.data_pl.inverse_transform(
data=batch_x.reshape(-1, batch_y.size(-1)).cpu().detach()
.numpy()).reshape(batch_x.size())
outputs_true = np.maximum(outputs_true, 0)
outputs_true = outputs_true[:, -self.pred_len:, :]
batch_y_true = batch_y_true[:, -self.pred_len:, :]
lower_ninety = lower_ninety[:, -self.pred_len:, :]
lower_quarter = lower_quarter[:, -self.pred_len:, :]
upper_quarter = upper_quarter[:, -self.pred_len:, :]
upper_ninety = upper_ninety[:, -self.pred_len:, :]
self.log("test_acc", accuracy(
outputs_true[:, :, 0], batch_y_true[:, :, 0]))
self.log("test_rmse", rmse(
outputs_true[:, :, 0], batch_y_true[:, :, 0]))
self.log("test_mae", mae(outputs_true[:, :, 0], batch_y_true[:, :, 0]))
self.log("test_mape", mape(
outputs_true[:, :, 0], batch_y_true[:, :, 0]))
self.log("test_mse", mse(outputs_true[:, :, 0], batch_y_true[:, :, 0]))
x_axis = list(range(self.seq_len+self.pred_len))
y_all = np.concatenate(
(batch_x_true, batch_y_true), axis=1)
for idx in np.random.choice(outputs_true.shape[0],
int(outputs_true.shape[0]*0.1),
replace=False):
plt.figure()
plt.plot(x_axis, y_all[idx, :, 0], label="true",
linewidth=0.75, alpha=0.75)
plt.plot(x_axis[-self.pred_len:], outputs_true[idx, :, 0],
label="pred", linewidth=0.75)
plt.fill_between(x=x_axis[-self.pred_len:],
y1=lower_ninety[idx, :, 0],
y2=upper_ninety[idx, :, 0],
alpha=0.25, label="90%",
color="green", linewidth=0.1)
plt.fill_between(x=x_axis[-self.pred_len:],
y1=lower_quarter[idx, :, 0],
y2=upper_quarter[idx, :, 0],
alpha=0.5, label="75%",
color="green", linewidth=0.1)
plt.legend(["true", "pred", "90%", "75%"])
self.pdf.savefig()
plt.close()
def test_epoch_end(self, outputs):
self.pdf.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment