Created
October 23, 2018 16:42
-
-
Save MFreidank/c61f5c762ed9311c5083a04c826396c9 to your computer and use it in GitHub Desktop.
Torch Guided Backprop -- ResNet Compatible version
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Created on Thu Oct 26 11:23:47 2017 | |
Original Author: | |
@author: Utku Ozbulak - github.com/utkuozbulak | |
Changes for ResNet Compatibility: | |
Moritz Freidank - github.com/MFreidank | |
""" | |
import torch | |
from torch.nn import ReLU | |
from misc_functions import (get_params, | |
convert_to_grayscale, | |
save_gradient_images, | |
get_positive_negative_saliency) | |
class GuidedBackprop(): | |
""" | |
Produces gradients generated with guided back propagation from the given image | |
""" | |
def __init__(self, model): | |
self.model = model | |
self.gradients = None | |
# Put model in evaluation mode | |
self.model.eval() | |
self.update_relus() | |
self.hook_layers() | |
def hook_layers(self): | |
def hook_function(module, grad_in, grad_out): | |
self.gradients = grad_in[0] | |
# Register hook to the first layer | |
first_layer = list(self.model.children())[0] | |
first_layer.register_backward_hook(hook_function) | |
def update_relus(self): | |
""" | |
Updates relu activation functions so that it only returns positive gradients | |
""" | |
def relu_hook_function(module, grad_in, grad_out): | |
""" | |
If there is a negative gradient, changes it to zero | |
""" | |
if isinstance(module, ReLU): | |
return (torch.clamp(grad_in[0], min=0.0),) | |
# Loop through layers, hook up ReLUs with relu_hook_function | |
for module in self.model.modules(): | |
if isinstance(module, ReLU): | |
module.register_backward_hook(relu_hook_function) | |
def generate_gradients(self, input_image, target_class): | |
# Forward pass | |
model_output = self.model(input_image) | |
# Zero gradients | |
self.model.zero_grad() | |
# Target for backprop | |
one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_() | |
one_hot_output[0][target_class] = 1 | |
# Backward pass | |
model_output.backward(gradient=one_hot_output) | |
# Convert Pytorch variable to numpy array | |
# [0] to get rid of the first channel (1,3,224,224) | |
gradients_as_arr = self.gradients.data.numpy()[0] | |
return gradients_as_arr | |
if __name__ == '__main__': | |
target_example = 0 # Snake | |
(original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\ | |
get_params(target_example) | |
# Guided backprop | |
GBP = GuidedBackprop(pretrained_model) | |
# Get gradients | |
guided_grads = GBP.generate_gradients(prep_img, target_class) | |
# Save colored gradients | |
save_gradient_images(guided_grads, file_name_to_export + '_Guided_BP_color') | |
# Convert to grayscale | |
grayscale_guided_grads = convert_to_grayscale(guided_grads) | |
# Save grayscale gradients | |
save_gradient_images(grayscale_guided_grads, file_name_to_export + '_Guided_BP_gray') | |
# Positive and negative saliency maps | |
pos_sal, neg_sal = get_positive_negative_saliency(guided_grads) | |
save_gradient_images(pos_sal, file_name_to_export + '_pos_sal') | |
save_gradient_images(neg_sal, file_name_to_export + '_neg_sal') | |
print('Guided backprop completed') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
@hamedbehzadi yes, i have the same error. the output has the same size as the output size of the first layer. it seems the gradients are not calculated w.r.t. the input image.
@MFreidank any hints for this problem? Thanks!