Skip to content

Instantly share code, notes, and snippets.

@qdwang
Created March 8, 2023 14:07
Show Gist options
  • Save qdwang/9030ccb2610bcc6a2108169a5e79a550 to your computer and use it in GitHub Desktop.
Save qdwang/9030ccb2610bcc6a2108169a5e79a550 to your computer and use it in GitHub Desktop.
pytorch learn sin
import torch
import random
from torch.nn.functional import *
device = "cuda"
class MyModel(torch.nn.Module):
def __init__(self, d_model, **args) -> None:
super(MyModel, self).__init__()
self.transformer = torch.nn.Transformer(d_model=d_model, **args)
def forward(self, input):
output = self.transformer(src=input, tgt=input)
return output
model = MyModel(d_model=1, nhead=1).to(device)
loss_fn = torch.nn.HuberLoss().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
model.train()
for i in range(10001):
inputs = torch.tensor([[torch.tensor((random.random() * 2 - 1) * 2 * 3.2)] for _ in range(1000)]).to(device)
targets = torch.tensor([[torch.sin(x)] for x in inputs]).to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, targets)
loss.backward()
optimizer.step()
if i % 100 == 0:
print(f'step {i} loss {loss.item()}')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment