Skip to content

Instantly share code, notes, and snippets.

@takuma104
Created May 23, 2023 18:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save takuma104/98d40804e222882320d9e2092d106373 to your computer and use it in GitHub Desktop.
Save takuma104/98d40804e222882320d9e2092d106373 to your computer and use it in GitHub Desktop.
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
def create_pipeline():
pipe = StableDiffusionPipeline.from_pretrained(
"gsdf/Counterfeit-V2.5", torch_dtype=torch.float16, safety_checker=None
).to("cuda")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
pipe.enable_xformers_memory_efficient_attention()
return pipe
def render(pipe):
return pipe(
prompt=prompt,
width=512,
height=512,
num_inference_steps=15,
num_images_per_prompt=4,
generator=torch.manual_seed(0),
).images
if __name__ == "__main__":
prompt = "A photo of sks dog in a bucket"
torch.cuda.reset_peak_memory_stats()
pipe = create_pipeline()
pipe.load_lora_weights('takuma104/lora_unetonly_rank4')
render(pipe)
mem_bytes = torch.cuda.max_memory_allocated()
print(f"rank4 -> {mem_bytes/(10**6)}MB")
del pipe
torch.cuda.reset_peak_memory_stats()
pipe = create_pipeline()
pipe.load_lora_weights('takuma104/lora_unetonly_rank128')
render(pipe)
mem_bytes = torch.cuda.max_memory_allocated()
print(f"rank128 -> {mem_bytes/(10**6)}MB")
del pipe
torch.cuda.reset_peak_memory_stats()
pipe = create_pipeline()
pipe.load_lora_weights("../stable-diffusion-study/models/lora/light_and_shadow.safetensors")
render(pipe)
mem_bytes = torch.cuda.max_memory_allocated()
print(f"light_and_shadow -> {mem_bytes/(10**6)}MB")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment