Created
July 15, 2020 07:50
-
-
Save jeongukjae/dc2f815a7d40f243f4e74cc8e4b203ba to your computer and use it in GitHub Desktop.
check gradient clipping using model.parameters() vs apex.amp.master_params(optimizer)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch, copy, apex | |
print("initialize model") | |
model_1 = torch.nn.Linear(1000, 2000).cuda() | |
model_2 = torch.nn.Linear(1000, 2000).cuda() | |
model_3 = torch.nn.Linear(1000, 2000).cuda() | |
model_1.weight = torch.nn.Parameter(model_3.weight.clone()) | |
model_2.weight = torch.nn.Parameter(model_3.weight.clone()) | |
optimizer_1 = torch.optim.SGD(model_1.parameters(), lr=1e2) | |
optimizer_2 = torch.optim.SGD(model_2.parameters(), lr=1e2) | |
optimizer_3 = torch.optim.SGD(model_3.parameters(), lr=1e2) | |
optimizer_1.load_state_dict(optimizer_3.state_dict()) | |
optimizer_2.load_state_dict(optimizer_3.state_dict()) | |
criterion = torch.nn.CrossEntropyLoss() | |
random_input = torch.rand(500, 1000).cuda() | |
target_input = torch.empty(500, dtype=torch.long).random_(2000).cuda() | |
amp_model1, amp_optimizer1 = apex.amp.initialize(model_1, optimizer_1, opt_level="O1") | |
amp_model2, amp_optimizer2 = apex.amp.initialize(model_2, optimizer_2, opt_level="O1") | |
amp_model3, amp_optimizer3 = apex.amp.initialize(model_3, optimizer_3, opt_level="O1") | |
print(amp_model1.weight - amp_model2.weight, torch.mean(torch.abs(amp_model1.weight - amp_model2.weight))) | |
print(amp_model3.weight - amp_model2.weight, torch.mean(torch.abs(amp_model3.weight - amp_model2.weight))) | |
for model, optimizer in [(amp_model1, amp_optimizer1), (amp_model2, amp_optimizer2), (amp_model3, amp_optimizer3)]: | |
optimizer.zero_grad() | |
output = model(random_input) | |
loss = torch.nn.CrossEntropyLoss()(output, target_input) | |
with apex.amp.scale_loss(loss, optimizer) as scaled_loss: | |
print("Scaled_Loss:", scaled_loss) | |
scaled_loss.backward() | |
print("Loss: ", loss) | |
if model == amp_model1: | |
print("Clip using torch") | |
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.02) | |
elif model == amp_model2: | |
print("Clip using apex master params") | |
torch.nn.utils.clip_grad_norm_(apex.amp.master_params(optimizer), 0.02) | |
else: | |
print("Dont clip") | |
optimizer.step() | |
print("Step Optimizer") | |
print(amp_model1.weight - amp_model2.weight, torch.mean(torch.abs(amp_model1.weight - amp_model2.weight))) | |
print(amp_model3.weight - amp_model2.weight, torch.mean(torch.abs(amp_model3.weight - amp_model2.weight))) | |
print(amp_model1.weight.grad - amp_model2.weight.grad, torch.mean(torch.abs(amp_model1.weight.grad - amp_model2.weight.grad))) | |
print(amp_model3.weight.grad - amp_model2.weight.grad, torch.mean(torch.abs(amp_model3.weight.grad - amp_model2.weight.grad))) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
output: