Skip to content

Instantly share code, notes, and snippets.

@varunshenoy
Created June 21, 2023 01:52
Show Gist options
  • Save varunshenoy/9fb80aa0eff0fec4ef4344ae9b108730 to your computer and use it in GitHub Desktop.
Save varunshenoy/9fb80aa0eff0fec4ef4344ae9b108730 to your computer and use it in GitHub Desktop.
An extension for Opendream that provides an operation for super-resolution via ControlNet with Tile preprocessing. Read more here: https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile
import torch
from PIL import Image
from diffusers import ControlNetModel, StableDiffusionControlNetImg2ImgPipeline, UniPCMultistepScheduler
from opendream import opendream
from opendream.layer import ImageLayer, Layer
def resize_for_condition_image(input_image: Image, resolution: int):
input_image = input_image.convert("RGB")
W, H = input_image.size
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(round(H / 64.0)) * 64
W = int(round(W / 64.0)) * 64
img = input_image.resize((W, H), resample=Image.LANCZOS)
return img
@opendream.define_op
def controlnet_tile(image_layer: ImageLayer, device: str = "cpu", model_ckpt: str = "runwayml/stable-diffusion-v1-5", batch_size = 1, seed = 42, selected = 0, num_steps = 20, **kwargs):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1e_sd15_tile", torch_dtype=torch.float32)
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
model_ckpt, controlnet=controlnet, torch_dtype=torch.float32, safety_checker=None
).to(device)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
if device == "cuda":
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()
generator = [torch.Generator().manual_seed(seed + i) for i in range(batch_size)]
condition_image = resize_for_condition_image(image_layer.get_image(), 256)
controlnet_image = pipe(
prompt="best quality",
negative_prompt="blur, lowres, bad anatomy, bad hands, cropped, worst quality",
image=condition_image,
control_image=condition_image,
width=condition_image.size[0],
height=condition_image.size[1],
strength=1.0,
num_inference_steps=num_steps,
generator=generator,
).images[selected]
return Layer(image=controlnet_image)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment