Skip to content

Instantly share code, notes, and snippets.

@crowsonkb

crowsonkb/ptf.py Secret

Created December 26, 2019 18:21
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save crowsonkb/fc08028bc4907820626bea4f34bbe63b to your computer and use it in GitHub Desktop.
Save crowsonkb/fc08028bc4907820626bea4f34bbe63b to your computer and use it in GitHub Desktop.
"""https://discuss.pytorch.org/t/applying-gradient-descent-to-a-function-using-pytorch/64912"""
import torch
from torch import nn, optim
from torch.utils import data
class NNTest(nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(0.7))
self.b = torch.nn.Parameter(torch.tensor(0.02))
def forward(self, x):
return torch.cos(self.a * x[:, 0]) + torch.exp(self.b * x[:, 1])
x1 = torch.rand(10000) * 10
x2 = torch.rand(10000) * 2 - 1
x = torch.stack([x1, x2], dim=-1)
y = torch.cos(0.583 * x1) + torch.exp(0.112 * x2)
dataset = data.TensorDataset(x, y)
loader = data.DataLoader(dataset, batch_size=512, shuffle=True)
model = NNTest()
opt = optim.SGD(model.parameters(), lr=1e-4)
loss_fn = nn.MSELoss()
for epoch in range(50):
model.train()
for inp, out in loader:
opt.zero_grad()
loss = loss_fn(model(inp), out)
loss.backward()
opt.step()
print(f'Epoch: {epoch}, Loss: {loss}')
print(f'Final a: {model.a}, b: {model.b}')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment