Skip to content

Instantly share code, notes, and snippets.

@khirotaka
Last active August 11, 2019 07:41
Show Gist options
  • Save khirotaka/b5d029d57d284b65aacdeceda30a0a4f to your computer and use it in GitHub Desktop.
Save khirotaka/b5d029d57d284b65aacdeceda30a0a4f to your computer and use it in GitHub Desktop.
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt
X = torch.tensor([
[i] for i in np.arange(1, 4.1, 0.1)
])
y = torch.tensor([[j] for j in
[0.045, 0.04, 0.06, 0.05, 0.055, 0.06, 0.095, 0.09,
0.1, 0.1, 0.13, 0.145, 0.15, 0.18, 0.115, 0.09, 0.21,
0.31, 0.33, 0.37, 0.38, 0.395, 0.41, 0.46, 0.47, 0.495, 0.49, 0.495, 0.42, 0.34, 0.19]
])
train_ds = TensorDataset(X, y)
train_loader = DataLoader(train_ds, shuffle=True, batch_size=1)
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.fc1 = nn.Linear(1, 4)
self.fc2 = nn.Linear(4, 1)
def forward(self, x):
x = torch.sigmoid(self.fc1(x))
return self.fc2(x)
def init_params(self, W, b):
self.fc1.weight = nn.Parameter(W["1"])
self.fc1.bias = nn.Parameter(b["1"])
self.fc2.weight = nn.Parameter(W["2"])
self.fc2.bias = nn.Parameter(b["2"])
def create_params(weights, biases):
w1 = weights["1"].reshape([4, 1])
b1 = biases["1"].reshape([4])
w2 = weights["2"].reshape([1, 4])
b2 = biases["2"].reshape([1])
w1 = torch.tensor(w1, requires_grad=True).float() # [4, 1]
b1 = torch.tensor(b1, requires_grad=True).float() # [4]
w2 = torch.tensor(w2, requires_grad=True).float() # [1, 4]
b2 = torch.tensor(b2, requires_grad=True).float() # [1]
W = {"1": w1, "2": w2}
b = {"1": b1, "2": b2}
return W, b
W = {
"1": np.array([1, -10, -50, 50], dtype=np.float32),
"2": np.array([0, 0, 0, 0], dtype=np.float32)
}
b = {
"1": np.array([-5, 30, 100, -100], dtype=np.float32),
"2": np.array([0], dtype=np.float32)
}
W, b = create_params(W, b)
model = Network()
model.init_params(W, b)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
def train(epochs):
model.train()
for epoch in range(epochs):
for data, target in train_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
def test():
model.eval()
outputs = []
with torch.no_grad():
for x, y in train_loader:
outputs.append(model(x).numpy())
return outputs
train(2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment