Skip to content

Instantly share code, notes, and snippets.

@starhopp3r
Last active March 31, 2018 12:10
Show Gist options
  • Save starhopp3r/4e27dfe26cadfd7316a10b2b3970db80 to your computer and use it in GitHub Desktop.
Save starhopp3r/4e27dfe26cadfd7316a10b2b3970db80 to your computer and use it in GitHub Desktop.
import torch
from torch.autograd import Variable
dtype = torch.FloatTensor # Datatype of our Tensors
# Equation: y = wX + b
y = Variable(torch.rand(10).type(dtype), requires_grad=False)
X = Variable(torch.rand(10).type(dtype), requires_grad=False)
w = Variable(torch.randn(10).type(dtype), requires_grad=True)
b = Variable(torch.randn(10).type(dtype), requires_grad=True)
learning_rate = 1e-2 # Learning rate
for iter in range(500):
pred_y = X.mul(w).add(b) # y = wX + b
loss = (pred_y - y).pow(2).sum() # Sum of squared errors
print(iter, loss.data[0]) # Iteration and loss
loss.backward() # Compute gradients
# Gradient descent
w.data -= learning_rate * w.grad.data
b.data -= learning_rate * b.grad.data
# Manually the zero the gradient buffers
w.grad.data.zero_()
b.grad.data.zero_()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment