Created
September 13, 2021 11:09
-
-
Save preddy5/e1f1654e87bab92a2f0d8a14d3a106ae to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import json | |
import PIL | |
from PIL import Image | |
from DC.compositing import composite, seed, composite_l2, composite_layers, composite_layers_A | |
from DC.loss import loss_fn, loss_fn_l2 | |
from DC.sampling import sampling_layer | |
import torch | |
from DC.common import vgg_normalize, selectBackground, \ | |
vgg_renormalize, tensor2img, get_vgg, remove_invisible, hardmax, d2_distance_matrix, select_distance_matrix_idx, \ | |
sort_by_z, remove_invisible_z, remove_bgcolor_elements | |
from DC.constants import * | |
from distutils.dir_util import copy_tree | |
from DC.utils import load_element, load_base_pattern, init_element_pos, render_constants, make_cuda, redo_variables, \ | |
init_optimizer, save_tensor, get_ss, get_ss_l2, select_op_variables, save_tensor_z, load_element_PIL, save_img_z | |
import numpy as np | |
# ------------------------- ------------------------- | |
folder_final = "{}/logs/expansion/final/{}/".format(folder_root, args.pattern) | |
if not os.path.exists(folder_final): | |
os.makedirs(folder_final) | |
# ------------------------- ------------------------- | |
w = 1 | |
w1 = 2 | |
w2 = 4 | |
w3 = 8 | |
w_o = 1e6 | |
def to_numpy(img): | |
return np.array(img) | |
def to_pil(img): | |
return PIL.Image.fromarray(np.uint8(img)) | |
def add_bg(img, bg): | |
img_np = to_numpy(img) | |
img_np[:,:,:3] = img_np[:,:,:3]*img_np[:,:,3:] * (1- img_np[:,:,3:])*bg[None,None,:] | |
img_pil = to_pil(img_np) | |
return img_pil | |
def create_img(xy, elements, background, element_id, thetas, width=256, height=256, scale=1.0, gen_img=None, color=None, background_img=None): | |
scale= int(scale) | |
canvas_size = [width*scale, height*scale] | |
xy = xy*scale | |
for i in range(len(elements)): | |
w_e = elements[i].size[0] | |
h_e = elements[i].size[1] | |
elements[i] = elements[i].resize((w_e*scale, h_e*scale), resample=PIL.Image.BICUBIC)#.resize((w_e*scale*4, h_e*scale*4), resample=PIL.Image.LANCZOS).filter(PIL.ImageFilter.GaussianBlur(radius=10)).resize((w_e*scale, h_e*scale), resample=PIL.Image.LANCZOS) | |
# if args.non_white: | |
# alpha_mask =0 | |
# else: | |
namespace_map = {0:'A', 1:'B', 2:'C', 3:'D', 4:'E', 5:'F', 6:'G', 7:'H'} | |
alpha_mask = 0 | |
if gen_img == None: | |
gen_img = Image.new('RGBA', tuple(canvas_size), | |
(background[0], background[1], background[2], alpha_mask)) # create new white image and paste source image into the center | |
empty = Image.new('RGBA', tuple(canvas_size), | |
(0, 0, 0, 0)) | |
total_number = len(element_id) | |
for idx in range(len(element_id)): | |
(x, y), i, angel = xy[idx], element_id[idx], thetas[idx] | |
x = math.ceil(x*width) | |
y = math.ceil(y*height) | |
element = elements[i].rotate(-angel*3.14159 * 57.2958, resample=Image.BILINEAR) | |
if type(color)!=type(None): | |
# import pdb; pdb.set_trace() | |
image = to_numpy(element) | |
# image[:,:,:3]=1 | |
image[:, :, :3] = image[:, :, :3]*color[None, idx, :] | |
element = to_pil(image) | |
w_2 = math.ceil(element.size[0]/2) | |
h_2 = math.ceil(element.size[1]/2) | |
empty.paste(element, box=(x- w_2, y- h_2, x+w_2 , y+h_2 )) | |
print('{:02d}-{}'.format(total_number-idx, namespace_map[i]), x/(2*width), y/(2*height)) | |
empty.save(folder_final + '{:02d}-{}'.format(total_number-idx, namespace_map[i]) + '.png') | |
gen_img = Image.alpha_composite(gen_img, empty) | |
empty = Image.new('RGBA', tuple(canvas_size), | |
(0, 0, 0, 0)) | |
if type(background_img)==type(None): | |
bg = Image.new('RGBA', tuple(canvas_size), | |
(background[0], background[1], background[2], | |
255)) # create new white image and paste source image into the center | |
else: | |
bg = background_img | |
img_w_bg = Image.alpha_composite(bg, gen_img) | |
return gen_img, img_w_bg | |
def calculate_color(c_variables, background): | |
z = torch.exp(c_variables[6] / 25) | |
z_hat = z / (z + 2000) | |
bg_hat = 2000 / (z + 2000) | |
# color_value_sig = torch.clamp(c_variables[8] / 20, min=0, max=1)#torch.nn.functional.sigmoid(c_variables[8]) | |
color_scaled = c_variables[8] / 20 | |
color_value_sig = torch.max(torch.min(color_scaled, 1 + (color_scaled - 1) * 0.001), 0.001 * color_scaled) | |
# color_values = (color_value_sig).to("cpu", torch.float).data.numpy() | |
color_values = ((color_value_sig * z_hat[:, None] + bg_hat[:, None] * background[:, :, 0, 0])).to("cpu", | |
torch.float).data.numpy() | |
return color_values | |
def main(): | |
global soft, w, w1, w2, w3, w_o, init_iter, base_resize, expand_size, n_soft_elements | |
base_resize = [256, 256] | |
expand_size = [1, 3, 256, 256] | |
# base_pattern, background = load_base_pattern(pattern_filename, args.tiled, base_resize) | |
if args.complex_background: | |
base_pattern, background, background_img = load_base_pattern(pattern_filename, args.tiled, base_resize, blur=False, complex_background=args.complex_background) | |
background_img = tensor2img(background_img) | |
background_img = to_pil(background_img*255).convert('RGBA') | |
background_img.save(folder_final + args.pattern + '_' + args.version + '_background' + '.png') | |
else: | |
base_pattern, background = load_base_pattern(pattern_filename, args.tiled, base_resize, blur=False) | |
background_img = None | |
element_filename = [] | |
for i in range(n_soft_elements): | |
element_filename.append('{}/data/{}/elements/{}.png'.format(folder_root, pattern, i + 1)) | |
elements = load_element_PIL(element_filename, number, background) | |
elements_tensor = load_element(element_filename, number, background, rotate=False, size=base_resize) | |
base_pattern, background, mean_c, std_c = make_cuda([base_pattern, background, mean, std]) | |
# ----------------- init ------------------------------ | |
if resume: | |
load_iter = args.resume_int | |
PATH = folder + 'checkpoint_' + str(load_iter) | |
print(PATH) | |
checkpoint = torch.load(PATH) | |
c_variables = checkpoint['c_variables'] | |
# background = checkpoint['background'] | |
torch.cuda.empty_cache() | |
op_variables = select_op_variables(n_elements, args, c_variables) | |
# optimizer.load_state_dict(checkpoint['optimizer_state_dict']) | |
# scheduler = CyclicLR(optimizer, base_lr=lr, max_lr=lr * 4, cycle_momentum=False, mode='exp_range', | |
# step_size_up=1500) | |
# ----------------------declare variables ------------------- | |
init_iter = load_iter+1 | |
w = 1 | |
w1 = 2 | |
w2 = 4 | |
w3 = 16.0 | |
w_o = 0 | |
soft = True | |
# for i in range(len(elements)): | |
# elements[i] = add_bg(elements[i], (background[0,:,0,0]*255).to("cpu", torch.int32).data.numpy()) | |
# if soft: | |
# c_variables = remove_invisible(c_variables) | |
# c_variables[6] = torch.argmax(c_variables[6], dim=1)#hardmax(c_variables[7]) | |
remove = False | |
c_variables[6] = c_variables[6]*50 | |
remove_color_elements = False | |
c_variables_copy = c_variables.copy() | |
c_variables = sort_by_z(c_variables) | |
color_values = None | |
if args.color: | |
color_values = calculate_color(c_variables, background) | |
if remove: | |
c_variables = remove_invisible_z(c_variables, 2000) | |
element_classes = torch.argmax(c_variables[7], dim=1) | |
distance_matrix = d2_distance_matrix(torch.cat([c_variables[0][:, None] / 10, c_variables[1][:, None] / 10], dim=1)) | |
idx = select_distance_matrix_idx(distance_matrix, element_classes, c_variables[5], | |
[10 / 256, 10 / 256, 15 / 256, 15 / 256, 20 / 256, 20 / 256, 5 / 256, 5 / 256, 5 / 256], min_dist= 2/256) | |
new_c_variables = [] | |
for i in c_variables: | |
new_c_variables.append(i[idx]) | |
if args.color and remove_color_elements: | |
new_c_variables = remove_bgcolor_elements(new_c_variables, color_values, background, 0.15) | |
if args.color: | |
color_values = calculate_color(new_c_variables, background) | |
c_variables = make_cuda(new_c_variables) | |
c_variables = sort_by_z(c_variables, False) | |
if args.color: | |
color_values = calculate_color(c_variables, background) | |
xy = torch.cat([c_variables[0][:, None], c_variables[1][:, None]], dim=1).to("cpu", torch.double).data.numpy()/10 | |
theta = (c_variables[2]* 1.5).to("cpu", torch.double).data.numpy() | |
scale = 1.0 | |
# print(c_variables_copy[8].shape, c_variables[8].shape) | |
img_wo_bg, img_w_bg = create_img(xy, elements, (background[0,:,0,0]*255).to("cpu", torch.int32).data.numpy(), | |
torch.argmax(c_variables[7], dim=1).to("cpu", torch.int32).data.numpy(), | |
theta, width=expand_size[2], height=expand_size[3], scale=scale, color=color_values, background_img=background_img) | |
rgba = np.array(img_wo_bg) | |
b_pattern = tensor2img(base_pattern) | |
img_w_bg.save(folder_final+args.pattern+'_'+args.version+'_'+str(args.resume_int)+'.png') | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'_idx', img_w_bg, c_variables[0] / 10, c_variables[1] / 10, torch.arange(c_variables_copy[6].shape[0]), size=256*scale) | |
save_img_z(folder_final, args.pattern+'_'+args.version+'_idx'+'_'+str(args.resume_int), img_w_bg, c_variables_copy[0] / 10, c_variables_copy[1] / 10, torch.arange(c_variables_copy[6].shape[0]), size=expand_size[3]*scale, n=0) | |
save_img_z(folder_final, args.pattern+'_'+args.version+'_new_idx'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, torch.arange(c_variables[6].shape[0]), size=expand_size[3]*scale, n=0) | |
save_img_z(folder_final, args.pattern+'_'+args.version+'_layer'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, torch.arange(c_variables[6].shape[0], 0, -1), size=expand_size[3]*scale, n=0) | |
save_img_z(folder_final, args.pattern+'_'+args.version+'_z'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables[6], size=expand_size[3]*scale) | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'_new_r'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables[8][:,0], size=expand_size[3]*scale, n=0) | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'_new_g'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables[8][:,1], size=expand_size[3]*scale, n=0) | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'_new_b'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables[8][:,2], size=expand_size[3]*scale, n=0) | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'__r'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables_copy[8][:,0], size=expand_size[3]*scale, n=0) | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'__g'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables_copy[8][:,1], size=expand_size[3]*scale, n=0) | |
# save_img_z(folder_final, args.pattern+'_'+args.version+'__b'+'_'+str(args.resume_int), img_w_bg, c_variables[0] / 10, c_variables[1] / 10, c_variables_copy[8][:,2], size=expand_size[3]*scale, n=0) | |
elements_tensor[0] = elements_tensor[0].to("cpu", torch.float32) | |
for i in range(len(c_variables)): | |
c_variables[i] = c_variables[i].to("cpu", torch.float32) | |
render, overlap = composite_layers_A(elements_tensor, c_variables, background.to("cpu", torch.float32), n_elements, expand_size, False, 1.5, | |
color=args.color) | |
save_tensor(folder_final, args.pattern+'_'+args.version+'_continuous', render) | |
log = {'xy':np.round_(xy, 3).tolist(), 'theta':np.round_(-theta*3.14159 * 57.2958, 3).tolist(), | |
'type':torch.argmax(c_variables[7], dim=1).tolist(), 'background':(background[0,:,0,0]*255).to("cpu", torch.int32).data.numpy().tolist()} | |
with open(folder_final+args.pattern+'_'+args.version+'result.json', 'w') as fp: | |
json.dump(log, fp) | |
residual = b_pattern * (1-rgba[:, :, 3:] / 255) | |
plt.imsave(folder_final+args.pattern+ 'residual_' + args.version+'.png', residual) | |
# render, overlap = composite_layers(elements, new_c_variables, background, n_elements, expand_size, soft, 3) | |
# # visualize_list([base_pattern, render]) | |
# base_pattern = vgg_renormalize(base_pattern, mean_c[:, :3], std_c[:, :3]) | |
# save_tensor(folder_final, 'BASE', base_pattern) | |
# save_tensor(folder_final, args.version, render) | |
print(c_variables[0], c_variables[1]) | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment