Skip to content

Instantly share code, notes, and snippets.

@ProGamerGov
Last active December 27, 2018 03:02
Show Gist options
  • Save ProGamerGov/bcbd27a3d2e431adb73ef158d9990d93 to your computer and use it in GitHub Desktop.
Save ProGamerGov/bcbd27a3d2e431adb73ef158d9990d93 to your computer and use it in GitHub Desktop.
Working Multi-Region Spatial Control in Neural-Style. Also known as "masked style transfer", and "semantic/segmented style transfer".
-- Original mask related code from: https://github.com/martinbenson/deep-photo-styletransfer
-- Modified mask code by github.com/ProGamerGov
require 'torch'
require 'nn'
require 'image'
require 'optim'
require 'loadcaffe'
local cmd = torch.CmdLine()
-- Basic options
cmd:option('-style_image', 'examples/inputs/seated-nude.jpg',
'Style target image')
cmd:option('-style_seg', '',
'Style segmentation image')
cmd:option('-style_blend_weights', 'nil')
cmd:option('-content_image', 'examples/inputs/tubingen.jpg',
'Content target image')
cmd:option('-content_seg', '',
'Style segmentation image')
cmd:option('-image_size', 512, 'Maximum height / width of generated image')
cmd:option('-gpu', '0', 'Zero-indexed ID of the GPU to use; for CPU mode set -gpu = -1')
cmd:option('-multigpu_strategy', '', 'Index of layers to split the network across GPUs')
cmd:option('-color_codes', 'blue,green,black,white,red,yellow,grey,lightblue,purple', 'Colors used in content mask')
-- Optimization options
cmd:option('-content_weight', 5e0)
cmd:option('-style_weight', 1e2)
cmd:option('-tv_weight', 1e-3)
cmd:option('-num_iterations', 1000)
cmd:option('-normalize_gradients', false)
cmd:option('-init', 'random', 'random|image')
cmd:option('-init_image', '')
cmd:option('-optimizer', 'lbfgs', 'lbfgs|adam')
cmd:option('-learning_rate', 1e1)
cmd:option('-lbfgs_num_correction', 0)
-- Output options
cmd:option('-print_iter', 50)
cmd:option('-save_iter', 100)
cmd:option('-output_image', 'out.png')
-- Other options
cmd:option('-style_scale', 1.0)
cmd:option('-original_colors', 0)
cmd:option('-pooling', 'max', 'max|avg')
cmd:option('-proto_file', 'models/VGG_ILSVRC_19_layers_deploy.prototxt')
cmd:option('-model_file', 'models/VGG_ILSVRC_19_layers.caffemodel')
cmd:option('-backend', 'nn', 'nn|cudnn|clnn')
cmd:option('-cudnn_autotune', false)
cmd:option('-seed', -1)
cmd:option('-content_layers', 'relu4_2', 'layers for content')
cmd:option('-style_layers', 'relu1_1,relu2_1,relu3_1,relu4_1,relu5_1', 'layers for style')
local function main(params)
local dtype, multigpu = setup_gpu(params)
local loadcaffe_backend = params.backend
if params.backend == 'clnn' then loadcaffe_backend = 'nn' end
local cnn = loadcaffe.load(params.proto_file, params.model_file, loadcaffe_backend):type(dtype)
local content_image = image.load(params.content_image, 3)
content_image = image.scale(content_image, params.image_size, 'bilinear')
local content_image_caffe = preprocess(content_image):float()
local style_size = math.ceil(params.style_scale * params.image_size)
local style_image_list = params.style_image:split(',')
local style_images_caffe = {}
for _, img_path in ipairs(style_image_list) do
local img = image.load(img_path, 3)
img = image.scale(img, style_size, 'bilinear')
local img_caffe = preprocess(img):float()
table.insert(style_images_caffe, img_caffe)
end
local init_image = nil
if params.init_image ~= '' then
init_image = image.load(params.init_image, 3)
local H, W = content_image:size(2), content_image:size(3)
init_image = image.scale(init_image, W, H, 'bilinear')
init_image = preprocess(init_image):float()
end
-- Handle style blending weights for multiple style inputs
local style_blend_weights = nil
if params.style_blend_weights == 'nil' then
-- Style blending not specified, so use equal weighting
style_blend_weights = {}
for i = 1, #style_image_list do
table.insert(style_blend_weights, 1.0)
end
else
style_blend_weights = params.style_blend_weights:split(',')
assert(#style_blend_weights == #style_image_list,
'-style_blend_weights and -style_images must have the same number of elements')
end
-- Normalize the style blending weights so they sum to 1
local style_blend_sum = 0
for i = 1, #style_blend_weights do
style_blend_weights[i] = tonumber(style_blend_weights[i])
style_blend_sum = style_blend_sum + style_blend_weights[i]
end
for i = 1, #style_blend_weights do
style_blend_weights[i] = style_blend_weights[i] / style_blend_sum
end
local content_layers = params.content_layers:split(",")
local style_layers = params.style_layers:split(",")
-- segmentation images
local style_seg_images_caffe = {}
local color_content_masks, color_style_masks = {}, {}
local color_codes = params.color_codes:split(",")
if params.content_seg and params.style_seg ~= '' then
local content_seg = image.load(params.content_seg, 3)
content_seg = image.scale(content_seg, params.image_size, 'bilinear')
local content_seg_caffe = content_seg:float()
local style_segs = params.style_seg:split(',')
assert(#style_segs == #style_image_list,
'-style_seg and -style_image must have the same number of elements')
for i, img_path in ipairs(style_segs) do
local style_seg = image.load(img_path, 3)
style_seg = image.scale(style_seg, style_size, 'bilinear')
local style_seg_caffe = style_seg:float()
table.insert(style_seg_images_caffe, style_seg_caffe)
end
--local color_content_masks, color_style_masks = {}, {}
for j = 1, #color_codes do
local content_mask_j = ExtractMask(content_seg_caffe, color_codes[j], dtype)
table.insert(color_content_masks, content_mask_j)
end
for i=1, #style_image_list do
tmp_table = {}
for j = 1, #color_codes do
local style_mask_i_j = ExtractMask(style_seg_images_caffe[i], color_codes[j], dtype)
table.insert(tmp_table, style_mask_i_j)
end
table.insert(color_style_masks, tmp_table)
end
end
-- Set up the network, inserting style and content loss modules
local content_losses, style_losses = {}, {}
local next_content_idx, next_style_idx = 1, 1
local net = nn.Sequential()
if params.tv_weight > 0 then
local tv_mod = nn.TVLoss(params.tv_weight):type(dtype)
net:add(tv_mod)
end
for i = 1, #cnn do
if next_content_idx <= #content_layers or next_style_idx <= #style_layers then
local layer = cnn:get(i)
local name = layer.name
local layer_type = torch.type(layer)
local is_pooling = (layer_type == 'cudnn.SpatialMaxPooling' or layer_type == 'nn.SpatialMaxPooling')
local is_conv = (layer_type == 'nn.SpatialConvolution' or layer_type == 'cudnn.SpatialConvolution')
if params.content_seg and params.style_seg ~= '' then
if is_pooling then
local pool_layer
if params.pooling == 'avg' then
assert(layer.padW == 0 and layer.padH == 0)
local kW, kH = layer.kW, layer.kH
local dW, dH = layer.dW, layer.dH
local avg_pool_layer = nn.SpatialAveragePooling(kW, kH, dW, dH):type(dtype)
local msg = 'Replacing max pooling at layer %d with average pooling'
print(string.format(msg, i))
pool_layer=avg_pool_layer
else
pool_layer=layer
end
net:add(pool_layer)
for k = 1, #color_codes do
color_content_masks[k] = image.scale(color_content_masks[k]:float(), math.ceil(color_content_masks[k]:size(2)/2), math.ceil(color_content_masks[k]:size(1)/2)):type(dtype)
end
for j = 1, #style_image_list do
for k = 1, #color_codes do
color_style_masks[j][k] = image.scale(color_style_masks[j][k]:float(), math.ceil(color_style_masks[j][k]:size(2)/2), math.ceil(color_style_masks[j][k]:size(1)/2)):type(dtype)
end
color_style_masks[j] = deepcopy(color_style_masks[j])
end
elseif is_conv then
net:add(layer)
local sap = nn.SpatialAveragePooling(3,3,1,1,1,1):type(dtype)
for k = 1, #color_codes do
color_content_masks[k] = sap:forward(color_content_masks[k]:repeatTensor(1,1,1))[1]:clone()
end
for j = 1, #style_image_list do
for k = 1, #color_style_masks do
color_style_masks[j][k] = sap:forward(color_style_masks[j][k]:repeatTensor(1,1,1))[1]:clone()
end
color_style_masks[j] = deepcopy(color_style_masks[j])
end
else
net:add(layer)
end
color_content_masks = deepcopy(color_content_masks)
elseif is_pooling and params.pooling == 'avg' then
assert(layer.padW == 0 and layer.padH == 0)
local kW, kH = layer.kW, layer.kH
local dW, dH = layer.dW, layer.dH
local avg_pool_layer = nn.SpatialAveragePooling(kW, kH, dW, dH):type(dtype)
local msg = 'Replacing max pooling at layer %d with average pooling'
print(string.format(msg, i))
net:add(avg_pool_layer)
else
net:add(layer)
end
if name == content_layers[next_content_idx] then
print("Setting up content layer", i, ":", layer.name)
local norm = params.normalize_gradients
local loss_module = nn.ContentLoss(params.content_weight, norm):type(dtype)
net:add(loss_module)
table.insert(content_losses, loss_module)
next_content_idx = next_content_idx + 1
end
if name == style_layers[next_style_idx] then
print("Setting up style layer ", i, ":", layer.name)
local norm = params.normalize_gradients
local loss_module
if params.content_seg ~= '' then
loss_module = nn.MaskedStyleLoss(params.style_weight, norm, color_style_masks, color_content_masks, color_codes, name):type(dtype)
else
loss_module = nn.StyleLoss(params.style_weight, norm):type(dtype)
end
net:add(loss_module)
table.insert(style_losses, loss_module)
next_style_idx = next_style_idx + 1
end
end
end
if multigpu then
net = setup_multi_gpu(net, params)
end
net:type(dtype)
-- Capture content targets
for i = 1, #content_losses do
content_losses[i].mode = 'capture'
end
print 'Capturing content targets'
print(net)
content_image_caffe = content_image_caffe:type(dtype)
net:forward(content_image_caffe:type(dtype))
-- Capture style targets
for i = 1, #content_losses do
content_losses[i].mode = 'none'
end
for i = 1, #style_images_caffe do
print(string.format('Capturing style target %d', i))
for j = 1, #style_losses do
style_losses[j].mode = 'capture'
style_losses[j].blend_weight = style_blend_weights[i]
end
net:forward(style_images_caffe[i]:type(dtype))
end
-- Set all loss modules to loss mode
for i = 1, #content_losses do
content_losses[i].mode = 'loss'
end
for i = 1, #style_losses do
style_losses[i].mode = 'loss'
end
-- We don't need the base CNN anymore, so clean it up to save memory.
cnn = nil
for i=1, #net.modules do
local module = net.modules[i]
if torch.type(module) == 'nn.SpatialConvolutionMM' then
-- remove these, not used, but uses gpu memory
module.gradWeight = nil
module.gradBias = nil
end
end
collectgarbage()
if params.style_seg ~= '' then
style_images_caffe=nil
style_seg_images_caffe=nil
end
-- Initialize the image
if params.seed >= 0 then
torch.manualSeed(params.seed)
end
local img = nil
if params.init == 'random' then
img = torch.randn(content_image:size()):float():mul(0.001)
elseif params.init == 'image' then
if init_image then
img = init_image:clone()
else
img = content_image_caffe:clone()
end
else
error('Invalid init type')
end
img = img:type(dtype)
-- Run it through the network once to get the proper size for the gradient
-- All the gradients will come from the extra loss modules, so we just pass
-- zeros into the top of the net on the backward pass.
local y = net:forward(img)
local dy = img.new(#y):zero()
-- Declaring this here lets us access it in maybe_print
local optim_state = nil
if params.optimizer == 'lbfgs' then
optim_state = {
maxIter = params.num_iterations,
verbose=true,
tolX=-1,
tolFun=-1,
}
if params.lbfgs_num_correction > 0 then
optim_state.nCorrection = params.lbfgs_num_correction
end
elseif params.optimizer == 'adam' then
optim_state = {
learningRate = params.learning_rate,
}
else
error(string.format('Unrecognized optimizer "%s"', params.optimizer))
end
local function maybe_print(t, loss)
local verbose = (params.print_iter > 0 and t % params.print_iter == 0)
if verbose then
print(string.format('Iteration %d / %d', t, params.num_iterations))
for i, loss_module in ipairs(content_losses) do
print(string.format(' Content %d loss: %f', i, loss_module.loss))
end
for i, loss_module in ipairs(style_losses) do
print(string.format(' Style %d loss: %f', i, loss_module.loss))
end
print(string.format(' Total loss: %f', loss))
end
end
local function maybe_save(t)
local should_save = params.save_iter > 0 and t % params.save_iter == 0
should_save = should_save or t == params.num_iterations
if should_save then
local disp = deprocess(img:double())
disp = image.minmax{tensor=disp, min=0, max=1}
local filename = build_filename(params.output_image, t)
if t == params.num_iterations then
filename = params.output_image
end
-- Maybe perform postprocessing for color-independent style transfer
if params.original_colors == 1 then
disp = original_colors(content_image, disp)
end
image.save(filename, disp)
end
end
-- Function to evaluate loss and gradient. We run the net forward and
-- backward to get the gradient, and sum up losses from the loss modules.
-- optim.lbfgs internally handles iteration and calls this function many
-- times, so we manually count the number of iterations to handle printing
-- and saving intermediate results.
local num_calls = 0
local function feval(x)
num_calls = num_calls + 1
net:forward(x)
local grad = net:updateGradInput(x, dy)
local loss = 0
for _, mod in ipairs(content_losses) do
loss = loss + mod.loss
end
for _, mod in ipairs(style_losses) do
loss = loss + mod.loss
end
maybe_print(num_calls, loss)
maybe_save(num_calls)
collectgarbage()
-- optim.lbfgs expects a vector for gradients
return loss, grad:view(grad:nElement())
end
-- Run optimization.
if params.optimizer == 'lbfgs' then
print('Running optimization with L-BFGS')
local x, losses = optim.lbfgs(feval, img, optim_state)
elseif params.optimizer == 'adam' then
print('Running optimization with ADAM')
for t = 1, params.num_iterations do
local x, losses = optim.adam(feval, img, optim_state)
end
end
end
function setup_gpu(params)
local multigpu = false
if params.gpu:find(',') then
multigpu = true
params.gpu = params.gpu:split(',')
for i = 1, #params.gpu do
params.gpu[i] = tonumber(params.gpu[i]) + 1
end
else
params.gpu = tonumber(params.gpu) + 1
end
local dtype = 'torch.FloatTensor'
if multigpu or params.gpu > 0 then
if params.backend ~= 'clnn' then
require 'cutorch'
require 'cunn'
if multigpu then
cutorch.setDevice(params.gpu[1])
else
cutorch.setDevice(params.gpu)
end
dtype = 'torch.CudaTensor'
else
require 'clnn'
require 'cltorch'
if multigpu then
cltorch.setDevice(params.gpu[1])
else
cltorch.setDevice(params.gpu)
end
dtype = torch.Tensor():cl():type()
end
else
params.backend = 'nn'
end
if params.backend == 'cudnn' then
require 'cudnn'
if params.cudnn_autotune then
cudnn.benchmark = true
end
cudnn.SpatialConvolution.accGradParameters = nn.SpatialConvolutionMM.accGradParameters -- ie: nop
end
return dtype, multigpu
end
function setup_multi_gpu(net, params)
local DEFAULT_STRATEGIES = {
[2] = {3},
}
local gpu_splits = nil
if params.multigpu_strategy == '' then
-- Use a default strategy
gpu_splits = DEFAULT_STRATEGIES[#params.gpu]
-- Offset the default strategy by one if we are using TV
if params.tv_weight > 0 then
for i = 1, #gpu_splits do gpu_splits[i] = gpu_splits[i] + 1 end
end
else
-- Use the user-specified multigpu strategy
gpu_splits = params.multigpu_strategy:split(',')
for i = 1, #gpu_splits do
gpu_splits[i] = tonumber(gpu_splits[i])
end
end
assert(gpu_splits ~= nil, 'Must specify -multigpu_strategy')
local gpus = params.gpu
local cur_chunk = nn.Sequential()
local chunks = {}
for i = 1, #net do
cur_chunk:add(net:get(i))
if i == gpu_splits[1] then
table.remove(gpu_splits, 1)
table.insert(chunks, cur_chunk)
cur_chunk = nn.Sequential()
end
end
table.insert(chunks, cur_chunk)
assert(#chunks == #gpus)
local new_net = nn.Sequential()
for i = 1, #chunks do
local out_device = nil
if i == #chunks then
out_device = gpus[1]
end
new_net:add(nn.GPU(chunks[i], gpus[i], out_device))
end
return new_net
end
function build_filename(output_image, iteration)
local ext = paths.extname(output_image)
local basename = paths.basename(output_image, ext)
local directory = paths.dirname(output_image)
return string.format('%s/%s_%d.%s',directory, basename, iteration, ext)
end
-- Preprocess an image before passing it to a Caffe model.
-- We need to rescale from [0, 1] to [0, 255], convert from RGB to BGR,
-- and subtract the mean pixel.
function preprocess(img)
local mean_pixel = torch.DoubleTensor({103.939, 116.779, 123.68})
local perm = torch.LongTensor{3, 2, 1}
img = img:index(1, perm):mul(256.0)
mean_pixel = mean_pixel:view(3, 1, 1):expandAs(img)
img:add(-1, mean_pixel)
return img
end
-- Undo the above preprocessing.
function deprocess(img)
local mean_pixel = torch.DoubleTensor({103.939, 116.779, 123.68})
mean_pixel = mean_pixel:view(3, 1, 1):expandAs(img)
img = img + mean_pixel
local perm = torch.LongTensor{3, 2, 1}
img = img:index(1, perm):div(256.0)
return img
end
-- Combine the Y channel of the generated image and the UV channels of the
-- content image to perform color-independent style transfer.
function original_colors(content, generated)
local generated_y = image.rgb2yuv(generated)[{{1, 1}}]
local content_uv = image.rgb2yuv(content)[{{2, 3}}]
return image.yuv2rgb(torch.cat(generated_y, content_uv, 1))
end
-- Define an nn Module to compute content loss in-place
local ContentLoss, parent = torch.class('nn.ContentLoss', 'nn.Module')
function ContentLoss:__init(strength, normalize)
parent.__init(self)
self.strength = strength
self.target = torch.Tensor()
self.normalize = normalize or false
self.loss = 0
self.crit = nn.MSECriterion()
self.mode = 'none'
end
function ContentLoss:updateOutput(input)
if self.mode == 'loss' then
self.loss = self.crit:forward(input, self.target) * self.strength
elseif self.mode == 'capture' then
self.target:resizeAs(input):copy(input)
end
self.output = input
return self.output
end
function ContentLoss:updateGradInput(input, gradOutput)
if self.mode == 'loss' then
if input:nElement() == self.target:nElement() then
self.gradInput = self.crit:backward(input, self.target)
end
if self.normalize then
self.gradInput:div(torch.norm(self.gradInput, 1) + 1e-8)
end
self.gradInput:mul(self.strength)
self.gradInput:add(gradOutput)
else
self.gradInput:resizeAs(gradOutput):copy(gradOutput)
end
return self.gradInput
end
local Gram, parent = torch.class('nn.GramMatrix', 'nn.Module')
function Gram:__init()
parent.__init(self)
end
function Gram:updateOutput(input)
assert(input:dim() == 3)
local C, H, W = input:size(1), input:size(2), input:size(3)
local x_flat = input:view(C, H * W)
self.output:resize(C, C)
self.output:mm(x_flat, x_flat:t())
return self.output
end
function Gram:updateGradInput(input, gradOutput)
assert(input:dim() == 3 and input:size(1))
local C, H, W = input:size(1), input:size(2), input:size(3)
local x_flat = input:view(C, H * W)
self.gradInput:resize(C, H * W):mm(gradOutput, x_flat)
self.gradInput:addmm(gradOutput:t(), x_flat)
self.gradInput = self.gradInput:view(C, H, W)
return self.gradInput
end
-- Define an nn Module to compute style loss in-place
local StyleLoss, parent = torch.class('nn.StyleLoss', 'nn.Module')
function StyleLoss:__init(strength, normalize)
parent.__init(self)
self.normalize = normalize or false
self.strength = strength
self.target = torch.Tensor()
self.mode = 'none'
self.loss = 0
self.gram = nn.GramMatrix()
self.blend_weight = nil
self.G = nil
self.crit = nn.MSECriterion()
end
function StyleLoss:updateOutput(input)
self.G = self.gram:forward(input)
self.G:div(input:nElement())
if self.mode == 'capture' then
if self.blend_weight == nil then
self.target:resizeAs(self.G):copy(self.G)
elseif self.target:nElement() == 0 then
self.target:resizeAs(self.G):copy(self.G):mul(self.blend_weight)
else
self.target:add(self.blend_weight, self.G)
end
elseif self.mode == 'loss' then
self.loss = self.strength * self.crit:forward(self.G, self.target)
end
self.output = input
return self.output
end
function StyleLoss:updateGradInput(input, gradOutput)
if self.mode == 'loss' then
local dG = self.crit:backward(self.G, self.target)
dG:div(input:nElement())
self.gradInput = self.gram:backward(input, dG)
if self.normalize then
self.gradInput:div(torch.norm(self.gradInput, 1) + 1e-8)
end
self.gradInput:mul(self.strength)
self.gradInput:add(gradOutput)
else
self.gradInput = gradOutput
end
return self.gradInput
end
-- Define an nn Module to compute masked style loss in-place
local MaskedStyleLoss, parent = torch.class('nn.MaskedStyleLoss', 'nn.Module')
function MaskedStyleLoss:__init(strength, normalize, color_style_masks, color_content_masks, color_codes)
parent.__init(self)
self.normalize = normalize or false
self.strength = strength
self.target_grams = {}
self.masked_grams = {}
self.masked_features = {}
self.mode = 'none'
self.gram = nn.GramMatrix()
self.blend_weight = nil
self.crit = nn.MSECriterion()
self.color_style_masks = deepcopy(color_style_masks)
self.color_content_masks = deepcopy(color_content_masks)
self.color_codes = color_codes
self.capture_count =1
end
function MaskedStyleLoss:updateOutput(input)
self.loss = 0
local masks
if self.mode == 'capture' then
masks = self.color_style_masks[self.capture_count]
self.capture_count = self.capture_count +1
elseif self.mode == 'loss' then
masks = self.color_content_masks
self.color_style_masks=nil
end
if self.mode ~= 'none' then
for j = 1, #self.color_codes do
local l_mask_ori = masks[j]:clone()
local l_mask = l_mask_ori:repeatTensor(1,1,1):expandAs(input)
local l_mean = l_mask_ori:mean()
local masked_features = torch.cmul(l_mask, input)
local masked_gram = self.gram:forward(masked_features):clone()
if l_mean > 0 then
masked_gram:div(input:nElement() * l_mean)
end
if self.mode == 'capture' then
if j>#self.target_grams then
table.insert(self.target_grams, masked_gram:mul(self.blend_weight))
table.insert(self.masked_grams, self.target_grams[j]:clone())
table.insert(self.masked_features, masked_features)
else
self.target_grams[j]:add(masked_gram:mul(self.blend_weight))
end
elseif self.mode == 'loss' then
self.masked_grams[j]=masked_gram
self.masked_features[j]=masked_features
self.loss = self.loss + self.crit:forward(self.masked_grams[j], self.target_grams[j]) * l_mean * self.strength
end
end
end
self.output = input
return self.output
end
function MaskedStyleLoss:updateGradInput(input, gradOutput)
if self.mode == 'loss' then
self.gradInput = gradOutput:clone()
self.gradInput:zero()
for j = 1, #self.color_codes do
local dG = self.crit:backward(self.masked_grams[j], self.target_grams[j])
dG:div(input:nElement())
local gradient = self.gram:backward(self.masked_features[j], dG)
if self.normalize then
gradient:div(torch.norm(gradient, 1) + 1e-8)
end
self.gradInput:add(gradient)
end
self.gradInput:mul(self.strength)
self.gradInput:add(gradOutput)
else
self.gradInput = gradOutput
end
return self.gradInput
end
local TVLoss, parent = torch.class('nn.TVLoss', 'nn.Module')
function TVLoss:__init(strength)
parent.__init(self)
self.strength = strength
self.x_diff = torch.Tensor()
self.y_diff = torch.Tensor()
end
function TVLoss:updateOutput(input)
self.output = input
return self.output
end
-- TV loss backward pass inspired by kaishengtai/neuralart
function TVLoss:updateGradInput(input, gradOutput)
self.gradInput:resizeAs(input):zero()
local C, H, W = input:size(1), input:size(2), input:size(3)
self.x_diff:resize(3, H - 1, W - 1)
self.y_diff:resize(3, H - 1, W - 1)
self.x_diff:copy(input[{{}, {1, -2}, {1, -2}}])
self.x_diff:add(-1, input[{{}, {1, -2}, {2, -1}}])
self.y_diff:copy(input[{{}, {1, -2}, {1, -2}}])
self.y_diff:add(-1, input[{{}, {2, -1}, {1, -2}}])
self.gradInput[{{}, {1, -2}, {1, -2}}]:add(self.x_diff):add(self.y_diff)
self.gradInput[{{}, {1, -2}, {2, -1}}]:add(-1, self.x_diff)
self.gradInput[{{}, {2, -1}, {1, -2}}]:add(-1, self.y_diff)
self.gradInput:mul(self.strength)
self.gradInput:add(gradOutput)
return self.gradInput
end
function ExtractMask(seg, color, dtype)
local mask = nil
if color == 'green' then
mask = torch.lt(seg[1], 0.1)
mask:cmul(torch.gt(seg[2], 1-0.1))
mask:cmul(torch.lt(seg[3], 0.1))
elseif color == 'black' then
mask = torch.lt(seg[1], 0.1)
mask:cmul(torch.lt(seg[2], 0.1))
mask:cmul(torch.lt(seg[3], 0.1))
elseif color == 'white' then
mask = torch.gt(seg[1], 1-0.1)
mask:cmul(torch.gt(seg[2], 1-0.1))
mask:cmul(torch.gt(seg[3], 1-0.1))
elseif color == 'red' then
mask = torch.gt(seg[1], 1-0.1)
mask:cmul(torch.lt(seg[2], 0.1))
mask:cmul(torch.lt(seg[3], 0.1))
elseif color == 'blue' then
mask = torch.lt(seg[1], 0.1)
mask:cmul(torch.lt(seg[2], 0.1))
mask:cmul(torch.gt(seg[3], 1-0.1))
elseif color == 'yellow' then
mask = torch.gt(seg[1], 1-0.1)
mask:cmul(torch.gt(seg[2], 1-0.1))
mask:cmul(torch.lt(seg[3], 0.1))
elseif color == 'grey' then
mask = torch.cmul(torch.gt(seg[1], 0.5-0.1), torch.lt(seg[1], 0.5+0.1))
mask:cmul(torch.cmul(torch.gt(seg[2], 0.5-0.1), torch.lt(seg[2], 0.5+0.1)))
mask:cmul(torch.cmul(torch.gt(seg[3], 0.5-0.1), torch.lt(seg[3], 0.5+0.1)))
elseif color == 'lightblue' then
mask = torch.lt(seg[1], 0.1)
mask:cmul(torch.gt(seg[2], 1-0.1))
mask:cmul(torch.gt(seg[3], 1-0.1))
elseif color == 'purple' then
mask = torch.gt(seg[1], 1-0.1)
mask:cmul(torch.lt(seg[2], 0.1))
mask:cmul(torch.gt(seg[3], 1-0.1))
else
print('ExtractMask(): color not recognized, color = ', color)
end
return mask:type(dtype)
end
function deepcopy(orig)
local orig_type = type(orig)
local copy
if orig_type == 'table' then
copy = {}
for orig_key, orig_value in next, orig, nil do
copy[deepcopy(orig_key)] = deepcopy(orig_value)
end
setmetatable(copy, deepcopy(getmetatable(orig)))
else -- number, string, boolean, etc
copy = orig
end
return copy
end
local params = cmd:parse(arg)
main(params)
@rayset
Copy link

rayset commented May 1, 2017

there's some issue with pooling=avg.
it gives me this error, I'll start investigate tomorrow.

Capturing style target 1
/home/rayset/torch/install/bin/luajit: /home/rayset/torch/install/share/lua/5.1/nn/Container.lua:67:
In 10 module of nn.Sequential:
/home/rayset/torch/install/share/lua/5.1/torch/Tensor.lua:322: incorrect size: only supporting singleton expansion (size=1)
stack traceback:
[C]: in function 'error'
/home/rayset/torch/install/share/lua/5.1/torch/Tensor.lua:322: in function 'expandAs'
spatial.lua:698: in function spatial.lua:685
[C]: in function 'xpcall'
/home/rayset/torch/install/share/lua/5.1/nn/Container.lua:63: in function 'rethrowErrors'
/home/rayset/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
spatial.lua:265: in function 'main'
spatial.lua:838: in main chunk
[C]: in function 'dofile'
...yset/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:145: in main chunk
[C]: at 0x00406670

thanks for the code tho, it's amazing.

@rayset
Copy link

rayset commented May 1, 2017

side question: the style blend weights are normalized to make them relative.
is the actual style weight for the various style their weight multiplied by the ''style_weight'' parameter? I think this is the case, but I'm not reading it in the code (I'm quite tired, 2 am :( )
thanks!

@ProGamerGov
Copy link
Author

Sorry for the late reply, it seems gists do not send emails when people comment on them?

The weight for each region should be the same, as the same style blend weights/style weights are applied to each region. And did you manage to figure out the avg pooling error?

@ProGamerGov
Copy link
Author

ProGamerGov commented Oct 12, 2017

Important Information regarding an issue people may have been experiencing with this code:

If you have more than one style image with it's associated mask using the same mask color, then you will have to repeat the color in the -color_codes parameter for it to work.

Example: You have a black mask image for your content image, and 2 style images that both have white and black mask images. Using -color_codes black will result in an error, but using -color_codes black,black will not.


You can also check out this wiki for mask creation guides using free and open source software: https://github.com/martinbenson/deep-photo-styletransfer/wiki

@ProGamerGov
Copy link
Author

ProGamerGov commented Jan 19, 2018

There appears to an issue with neural_style_seg.lua and the fcn32s-heavy-pascal model:

[libprotobuf ERROR google/protobuf/text_format.cc:274] Error parsing text-format caffe.NetParameter: 9:14: Message type "caffe.PythonParameter" has no field named "param_str".
[libprotobuf WARNING google/protobuf/io/coded_stream.cc:537] Reading dangerously large protocol message.  If the message turns out to be larger than 1073741824 bytes, parsing will be halted for security reasons.  To increase the limit (or to disable these warnings), see CodedInputStream::SetTotalBytesLimit() in google/protobuf/io/coded_stream.h.
[libprotobuf WARNING google/protobuf/io/coded_stream.cc:78] The total number of bytes read was 544614787
Successfully loaded models/fcn32s-heavy-pascal.caffemodel
warning: module 'data [type Python]' not found
warning: module 'data_data_0_split [type Split]' not found
warning: module 'upscore [type Deconvolution]' not found
warning: module 'score [type Crop]' not found
conv1_1: 64 3 3 3
conv1_2: 64 64 3 3
conv2_1: 128 64 3 3
conv2_2: 128 128 3 3
conv3_1: 256 128 3 3
conv3_2: 256 256 3 3
conv3_3: 256 256 3 3
conv4_1: 512 256 3 3
conv4_2: 512 512 3 3
conv4_3: 512 512 3 3
conv5_1: 512 512 3 3
conv5_2: 512 512 3 3
conv5_3: 512 512 3 3
fc6: 4096 512 7 7
fc7: 4096 4096 1 1
score_fr: 21 4096 1 1
Setting up style layer          2       :       relu1_1
Setting up style layer          7       :       relu2_1
Setting up style layer          12      :       relu3_1
Setting up style layer          19      :       relu4_1
Setting up content layer        21      :       relu4_2
Setting up style layer          26      :       relu5_1
Capturing content targets
nn.Sequential {
  [input -> (1) -> (2) -> (3) -> (4) -> (5) -> (6) -> (7) -> (8) -> (9) -> (10) -> (11) -> (12) -> (13) -> (14) -> (15) -> (16) -> (17) -> (18) -> (19) -> (20) -> (21) -> (22) -> (23) -> (24) -> (25) -> (26) -> (27) -> (28) -> (29) -> (30) -> (31) -> (32) -> output]
  (1): cudnn.SpatialConvolution(3 -> 64, 3x3, 1,1, 100,100)
  (2): cudnn.ReLU
  (3): nn.MaskedStyleLoss
  (4): cudnn.SpatialConvolution(64 -> 64, 3x3, 1,1, 1,1)
  (5): cudnn.ReLU
  (6): cudnn.SpatialMaxPooling(2x2, 2,2)
  (7): cudnn.SpatialConvolution(64 -> 128, 3x3, 1,1, 1,1)
  (8): cudnn.ReLU
  (9): nn.MaskedStyleLoss
  (10): cudnn.SpatialConvolution(128 -> 128, 3x3, 1,1, 1,1)
  (11): cudnn.ReLU
  (12): cudnn.SpatialMaxPooling(2x2, 2,2)
  (13): cudnn.SpatialConvolution(128 -> 256, 3x3, 1,1, 1,1)
  (14): cudnn.ReLU
  (15): nn.MaskedStyleLoss
  (16): cudnn.SpatialConvolution(256 -> 256, 3x3, 1,1, 1,1)
  (17): cudnn.ReLU
  (18): cudnn.SpatialConvolution(256 -> 256, 3x3, 1,1, 1,1)
  (19): cudnn.ReLU
  (20): cudnn.SpatialMaxPooling(2x2, 2,2)
  (21): cudnn.SpatialConvolution(256 -> 512, 3x3, 1,1, 1,1)
  (22): cudnn.ReLU
  (23): nn.MaskedStyleLoss
  (24): cudnn.SpatialConvolution(512 -> 512, 3x3, 1,1, 1,1)
  (25): cudnn.ReLU
  (26): nn.ContentLoss
  (27): cudnn.SpatialConvolution(512 -> 512, 3x3, 1,1, 1,1)
  (28): cudnn.ReLU
  (29): cudnn.SpatialMaxPooling(2x2, 2,2)
  (30): cudnn.SpatialConvolution(512 -> 512, 3x3, 1,1, 1,1)
  (31): cudnn.ReLU
  (32): nn.MaskedStyleLoss
}
Capturing style target 1
/home/ubuntu/torch/install/bin/luajit: /home/ubuntu/torch/install/share/lua/5.1/nn/Container.lua:67:
In 3 module of nn.Sequential:
/home/ubuntu/torch/install/share/lua/5.1/torch/Tensor.lua:326: incorrect size: only supporting singleton expansion (size=1)
stack traceback:
        [C]: in function 'error'
        /home/ubuntu/torch/install/share/lua/5.1/torch/Tensor.lua:326: in function 'expandAs'
        neural_style_seg.lua:698: in function <neural_style_seg.lua:685>
        [C]: in function 'xpcall'
        /home/ubuntu/torch/install/share/lua/5.1/nn/Container.lua:63: in function 'rethrowErrors'
        /home/ubuntu/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
        neural_style_seg.lua:265: in function 'main'
        neural_style_seg.lua:838: in main chunk
        [C]: in function 'dofile'
        ...untu/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:150: in main chunk
        [C]: at 0x00405d50

WARNING: If you see a stack trace below, it doesn't point to the place where this error occurred. Please use only the one above.
stack traceback:
        [C]: in function 'error'
        /home/ubuntu/torch/install/share/lua/5.1/nn/Container.lua:67: in function 'rethrowErrors'
        /home/ubuntu/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
        neural_style_seg.lua:265: in function 'main'
        neural_style_seg.lua:838: in main chunk
        [C]: in function 'dofile'
        ...untu/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:150: in main chunk
        [C]: at 0x00405d50

Basically the model won't work with the script with the default train_val.prototxt. The model still works with the normal unmodified Neural-Style script.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment