Skip to content

Instantly share code, notes, and snippets.

@ProGamerGov
Last active March 19, 2018 04:48
Show Gist options
  • Save ProGamerGov/21e958010d96280d8e0914efcb9269d1 to your computer and use it in GitHub Desktop.
Save ProGamerGov/21e958010d96280d8e0914efcb9269d1 to your computer and use it in GitHub Desktop.
import torch
import torch.nn as nn
#import torch.legacy.nn as lnn
import torchvision
from torch.autograd import Variable
class ContentLoss(nn.Module):
def __init__(self, strength, normalize):
super(ContentLoss, self).__init__()
self.target = Variable((torch.Tensor()),requires_grad=False).detach()
self.strength = strength
self.crit = nn.MSELoss()
self.mode = None
self.normalize = 'False'
def forward(self, input):
if self.mode == 'loss':
self.loss = self.crit(input.cpu(), self.target.cpu()) * self.strength
elif self.mode == 'capture':
self.target.data.resize_as_(input.cpu().data).copy_(input.cpu().data)
self.output = input
return self.output
def backward(self, input, gradOutput, retain_graph=True):
if self.mode == 'loss':
if input.nelement() == self.target.nelement():
self.loss.backward(retain_graph=retain_graph)
#G = Variable(self.target.data, requires_grad=True) #This works?
#G.backward(input.data) #This works?
if self.normalize == 'True':
self.loss.div(torch.norm(self.loss, 1) + 1e-8) # Normalize Gradients
self.loss.mul(self.strength)
self.loss.add(gradOutput)
else:
self.target.resize_as_(gradOutput).copy_(gradOutput)
return self.loss
class GramMatrix(nn.Module):
def forward(self, input):
print("input - gram - forward " + str(input.size()))
B, C, H, W = input.size()
x_flat = input.view(B * C, H * W)
print("x_flat - gram - forward " + str(x_flat.size()))
self.output = torch.mm(x_flat, x_flat.t())
self.output.div_(H*W)
return self.output
def backward(self, input, gradOutput):
#gradOutput = gradOutput.grad.div(input.nelement())
print("input - gram - backward " + str(input.size()))
print("gradOutput - gram - backward " + str(gradOutput.size()))
B, C, H, W = input.size()
x_flat = input.view(B * C, H * W)
print("x_flat - gram - backward " + str(x_flat.size()))
print(gradOutput.size())
print(x_flat.size())
quit()
self.gradInput = torch.mm(gradOutput, x_flat)
self.gradInput.addmm(gradOutput.t(), x_flat)
self.gradInput = self.gradInput.view(C, H, W)
return self.gradInput
#def BackwardsStyleHook(self, input, gradOutput)
class StyleLoss(nn.Module):
def __init__(self, strength, normalize):
super(StyleLoss, self).__init__()
self.target = Variable((torch.Tensor()),requires_grad=False).detach()
self.strength = strength
self.gram = GramMatrix()
self.crit = nn.MSELoss()
self.mode = None
self.blend_weight = None
self.G = None
self.normalize = 'False'
def forward(self, input):
self.output = input.clone()
self.G = self.gram.forward(input)
self.G.div(input.nelement())
if self.mode == 'capture':
if self.blend_weight == None:
self.target.data.resize_as_(self.G.cpu().data).copy_(self.G.cpu().data)
elif self.target.nelement() == 0:
self.target.data.resize_as_(self.G.cpu().data).copy_(self.G.cpu().data).mul_(self.blend_weight)
else:
self.target.data.add(self.blend_weight, self.G.data)
elif self.mode == 'loss':
self.loss = self.strength * self.crit(self.G.cpu(), self.target.cpu())
return self.output
def backward(self, input, gradOutput):
if self.mode == 'loss':
#dG = Variable(self.G.data, requires_grad=True) #This works?
#dG.backward(self.target.data) #This works?
dG = Variable(self.target.data, requires_grad=True) #This works?
dG.backward(self.G.data) #This works?
self.gradInput = self.gram.backward(input, dG.grad)
if self.normalize == 'True':
self.gradInput.div(torch.norm(self.gradInput, 1) + 1e-8) # Normalize Gradients
self.gradInput.mul(self.strength)
self.gradInput.add(gradOutput)
else:
self.gradInput = gradOutput
return self.gradInput
import torch
import torch.nn as nn
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable, Function
import torch.optim as optim
from PIL import Image
import os
import sys
from LossModules2 import ContentLoss
from LossModules2 import StyleLoss
from LossModules2 import GramMatrix
import argparse
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument("-style_image", help="Style target image", default='examples/inputs/seated-nude.jpg')
parser.add_argument("-content_image", help="Content target image", default='examples/inputs/tubingen.jpg')
parser.add_argument("-image_size", help="Maximum height / width of generated image", type=int, default=512)
# Optimization options
parser.add_argument("-num_iterations", help="iterations", type=int, default=1000)
parser.add_argument("-optimizer", help="optimiser", default="lbfgs", choices=["lbfgs", "adam"])
parser.add_argument("-learning_rate", default=1)
# Output options
parser.add_argument("-print_iter", type=int, default=50)
parser.add_argument("-save_iter", type=int, default=100)
parser.add_argument("-output_image", default='out.png')
# Other options
parser.add_argument("-model_file", help="VGG 19 model file location", type=str, default='vgg19-d01eb7cb.pth')
parser.add_argument("-seed", help="random number seed", type=int, default=-1)
params = parser.parse_args()
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# Initialize the image
if params.seed >= 0:
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
def ImageSetup2(image_name, image_size):
image = Image.open(image_name)
image = image.convert('RGB')
Loader = transforms.Compose([transforms.Resize((image_size)), transforms.ToTensor()]) # resize and convert to tensor
#Normalize = transforms.Compose([transforms.Normalize(mean=[ 0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229])]) # BGR STD & Mean
#image = Variable(Normalize(Loader(image)))
image = Variable(Loader(image))
image = image.unsqueeze(0)
print(image.size())
return image
def ImageSetup(image_name, image_size):
image = Image.open(image_name)
image = image.convert('RGB')
Loader = transforms.Compose([transforms.Resize((image_size)), transforms.ToTensor()]) # resize and convert to tensor
Normalize = transforms.Compose([transforms.Normalize(mean=[ 0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229])]) # BGR STD & Mean
image = Variable(Normalize(Loader(image)).clamp_(-1, 1))
image = image.unsqueeze(0)
return image
def SaveImage(output_img, output_name):
torchvision.utils.save_image(output_img, output_name, nrow=8, padding=2, normalize=False, range=None, scale_each=False, pad_value=0)
#from ImageStuff import preprocess, deprocess
#content_image = preprocess(params.content_image).cpu()
#style_image = preprocess(params.style_image).cpu()
content_image = ImageSetup2(params.content_image, params.image_size).cpu()
style_image = ImageSetup2(params.style_image, params.image_size).cpu()
# Separate names for layers
VGG19_Layer_List = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5', 'torch_view', 'fc6', 'relu6', 'drop6', 'fc7', 'relu7', 'drop7', 'fc8', 'prob']
VGG16_layer_List = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5', 'torch_view', 'fc6', 'relu6', 'drop6', 'fc7', 'relu7', 'drop7', 'fc8', 'prob']
NIN_Layer_List = ['conv1', 'relu0', 'cccp1', 'relu1', 'cccp2', 'relu2', 'pool0', 'conv2', 'relu3', 'cccp3', 'relu5', 'cccp4', 'relu6', 'pool2', 'conv3', 'relu7', 'cccp5', 'relu8', 'cccp6', 'relu9', 'pool3', 'drop', 'conv4-1024', 'relu10', 'cccp7-1024', 'relu11', 'cccp8-1024', 'relu12', 'pool4', 'loss']
def ModelSetup(cnn, style_weight, content_weight, Layer_List, content_layers, style_layers, normalize_gradients):
content_losses = []
style_losses = []
next_content_idx = 1
next_style_idx = 1
net = nn.Sequential()
net = net.cpu()
i = 0
for layer in list(cnn):
l = int(i)
layer_name = Layer_List[l]
if "conv" in layer_name:
net.add_module(layer_name, layer)
if layer_name in content_layers:
print("Setting up content layer " + str(next_content_idx) + ": " + str(layer_name))
norm = normalize_gradients
loss_module = ContentLoss(content_weight, norm)
net.add_module(layer_name, loss_module)
content_losses.append(loss_module)
next_content_idx = next_content_idx + 1
if layer_name in style_layers:
print("Setting up style layer " + str(next_style_idx) + ": " + str(layer_name))
norm = normalize_gradients
loss_module = StyleLoss(style_weight, norm)
net.add_module(layer_name, loss_module)
style_losses.append(loss_module)
next_style_idx = next_style_idx + 1
if "relu" in layer_name:
net.add_module(layer_name, layer)
if layer_name in content_layers:
print("Setting up content layer " + str(next_content_idx) + ": " + str(layer_name))
norm = normalize_gradients
loss_module = ContentLoss(content_weight, norm)
net.add_module(layer_name, loss_module)
content_losses.append(loss_module)
next_content_idx = next_content_idx + 1
if layer_name in style_layers:
print("Setting up style layer " + str(next_style_idx) + ": " + str(layer_name))
norm = normalize_gradients
loss_module = StyleLoss(style_weight, norm)
net.add_module(layer_name, loss_module)
style_losses.append(loss_module)
next_style_idx = next_style_idx + 1
if "pool" in layer_name:
net.add_module(layer_name, layer) # ***
i = i + 1
cnn = None
return net, style_losses, content_losses
model_type ='vgg19' # Default value for testing
style_weight = 1000 # Default value for testing
content_weight = 100 # Default value for testing
normalize_gradients = 'False' # Default value for testing
content_layers = ['relu4_2'] # Default value for testing
style_layers = ['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'] # Default value for testing
max_iter = 1000 # Default value for testing
cnn = None
Layer_List = []
model_name = os.path.splitext(params.model_file)[0].split('-')[0]
print(model_name)
cnn = getattr(models, model_name)()
cnn.load_state_dict(torch.load(params.model_file))
cnn = cnn.features
if model_type == 'vgg19':
#cnn = models.vgg19(pretrained=True).features
Layer_List = VGG19_Layer_List
elif model_type == 'vgg16':
#cnn = models.vgg16(pretrained=True).features
Layer_List = VGG16_Layer_List
# Build the style transfer model:
net, style_losses, content_losses = ModelSetup(cnn, style_weight, content_weight, Layer_List, content_layers, style_layers, normalize_gradients)
net = net.cpu()
img = content_image.clone()
img = nn.Parameter(img.data,requires_grad=True)
content_image = nn.Parameter(content_image.data,requires_grad=True)
style_image = nn.Parameter(style_image.data,requires_grad=True)
# Capture content targets
for i in content_losses:
i.mode = 'capture'
net(content_image).cpu()
print("Capturing content targets")
# Capture style targets
for i in content_losses:
i.mode = None
for j in style_losses:
j.mode = 'capture'
#j.blend_weight = style_blend_weights[i]
net(style_image)
# Set all loss modules to loss mode
for i in content_losses:
i.mode = 'loss'
for i in style_losses:
i.mode = 'loss'
def maybe_print(t, loss):
if params.print_iter > 0 and t % params.print_iter == 0:
print("Iteration: " + str(t) + " / "+ str(params.num_iterations))
c = 1
for i in content_losses:
print(" Content " + str(c) + " loss: "+ "loss value recording is broken")
c = c+1
s = 1
for i in style_losses:
print(" Style " + str(s) + " loss: "+ "loss value recording is broken")
s = s+1
print(" Total loss " + "loss value recording is broken")
def maybe_save(t):
should_save = params.save_iter > 0 and t % params.save_iter == 0
should_save = should_save or t == params.num_iterations
if should_save:
output_filename, file_extension = os.path.splitext(params.output_image)
if t == params.num_iterations:
filename = output_filename + str(file_extension)
else:
filename = str(output_filename) + "_" + str(t) + str(file_extension)
SaveImage(img.data, filename)
#optim_state = None
#if params.optimizer == 'lbfgs':
#optim_state = (max_iter = params.num_iterations, tolerance_change = -1, tolerance_grad = -1)
#elif params.optimizer == 'adam':
# optim_state = {
# "lr": 1,
# }
optimizer = None
# Run optimization.
if params.optimizer == 'lbfgs':
print("Running optimization with L-BFGS")
optimizer = optim.LBFGS([img], max_iter = params.num_iterations, tolerance_change = -1, tolerance_grad = -1)
elif params.optimizer == 'adam':
print("Running optimization with ADAM")
for t in xrange(params.num_iterations):
optimizer = optim.Adam([img], lr = params.learning_rate)
y = net(img)
dy = Variable(y.data.resize_as_(content_image.data).zero_())
num_calls = [0]
while num_calls[0] <= params.num_iterations:
def feval():
num_calls[0] += 1
img.data.clamp_(-1, 1)
optimizer.zero_grad()
#print(torch.mean(img.data))
net(img)
loss = 0
for mod in content_losses:
mod.backward(img, dy)
loss = loss + mod.loss
for mod in style_losses:
mod.backward(img, dy)
loss = loss + mod.loss
maybe_print(num_calls[0], loss)
maybe_save(num_calls[0])
return -loss
optimizer.step(feval)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment