Skip to content

Instantly share code, notes, and snippets.

@chao1224
Last active August 19, 2022 14:55
Show Gist options
  • Save chao1224/1f07666d44a4c2d99cd5061b79d49729 to your computer and use it in GitHub Desktop.
Save chao1224/1f07666d44a4c2d99cd5061b79d49729 to your computer and use it in GitHub Desktop.
PyTorch gradients example
from torch.autograd._functions import *
from torch.autograd import Variable, Function
from sklearn import datasets
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as auto
from torch.autograd import Variable
import torch.optim as optim
torch.manual_seed(123)
N = 100
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 20)
self.fc3 = nn.Linear(20, 5)
self.fc4 = nn.Linear(5, 1)
self.relu = nn.ReLU()
def forward(self, x):
self.x1 = self.fc1(x)
self.x1.retain_grad()
x = self.relu(self.x1)
self.x2 = self.fc2(x)
self.x3 = self.fc3(self.x2)
x = self.relu(self.x3)
self.x4 = self.fc4(x)
self.x4.retain_grad()
return self.x4
def fit(model, optimizer, data, target):
model.train()
criterion = nn.MSELoss()
for e in range(200):
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
def get_gradients(model, optimizer, data, target):
model.eval()
criterion = nn.MSELoss()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward(retain_graph=True)
for name, param in model.named_parameters():
if 'weight' in name:
print name
print param.data.cpu().numpy().shape
print 'gradient is \t', param.grad, '\trequires grad: ', param.requires_grad
print
optimizer.zero_grad()
for name, param in model.named_parameters():
if 'weight' in name:
print name
grad = auto.grad(loss, param, retain_graph=True, only_inputs=False)
print 'another gradit\t', grad
print
def iris():
iris = datasets.load_iris()
X_train = iris.data[:N]
y_train = iris.target[:N]
data = Variable(torch.FloatTensor(X_train).cuda(), requires_grad=True)
target = Variable(torch.FloatTensor(y_train).cuda())
model = Net()
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fit(model, optimizer, data, target)
get_gradients(model, optimizer, data[0], target[0])
return
if __name__ == '__main__':
iris()
@chao1224
Copy link
Author

This corresponds to the PyTorch issue

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment