Skip to content

Instantly share code, notes, and snippets.

@JemiloII
Last active May 13, 2023 19:25
Show Gist options
  • Save JemiloII/9ed9f02cc741ef355d3d0936d2b557ea to your computer and use it in GitHub Desktop.
Save JemiloII/9ed9f02cc741ef355d3d0936d2b557ea to your computer and use it in GitHub Desktop.
Trying to load LyCORIS into the Pipeline
# ./generate.py
import os
import torch
from diffusers.utils import logging
from diffusers import EulerAncestralDiscreteScheduler, DiffusionPipeline
from lib.utils import get_settings, get_tensor, save
# Yes, I love JavaScript
logging.set_verbosity_debug()
console = logging.get_logger("diffusers")
cwd = os.getcwd()
console.log = console.debug
console.log("Backends Cudnn Enabled: " + str(torch.backends.cudnn.enabled))
console.log("Cuda is available: " + str(torch.cuda.is_available()))
def run():
settings = get_settings(
num_inference_steps=75,
guidance_scale=7.75,
seed=75,
num_train_timesteps=975,
torch_dtype="float16"
)
euler_a = EulerAncestralDiscreteScheduler(
beta_start=settings["eular_a"]["beta_start"],
beta_end=settings["eular_a"]["beta_end"],
beta_schedule=settings["eular_a"]["beta_schedule"],
num_train_timesteps=settings["eular_a"]["num_train_timesteps"],
prediction_type=settings["eular_a"]["prediction_type"],
)
with torch.inference_mode():
with torch.no_grad():
with torch.autocast(settings["pipeline"]["device"]):
pipeline = DiffusionPipeline.from_pretrained(
pretrained_model_name_or_path=settings["pipeline"]["cache_dir"] + settings["pipeline"]["pretrained_model_name_or_path"],
torch_dtype=get_tensor(settings["pipeline"]["torch_dtype"]),
# device_map=settings["pipeline"]["device_map"],
cache_dir=settings["pipeline"]["cache_dir"],
use_safetensors=settings["pipeline"]["use_safetensors"],
safety_checker=settings["pipeline"]["safety_checker"],
requires_safety_checker=settings["pipeline"]["requires_safety_checker"],
local_files_only=settings["pipeline"]["local_files_only"],
force_download=settings["pipeline"]["force_download"],
scheduler=euler_a,
)
pipeline.to(torch_device=settings["pipeline"]["device"], torch_dtype=get_tensor(settings["pipeline"]["torch_dtype"]))
# Hugging Face Example
# pipeline.unet.load_attn_procs(
# lora_path,
# cache_dir=settings["pipeline"]["cache_dir"],
# local_files_only=settings["pipeline"]["local_files_only"],
# use_safetensors=settings["pipeline"]["use_safetensors"],
# )
# LyCORIS Example - https://github.com/huggingface/diffusers/pull/3294
# While on civitai it is labeled LoRA, it is actually LyCORIS
lycoris_path = f"{cwd}/assets/lora/LookingDisgusted_V1.safetensors"
pipeline.load_lora_weights(lycoris_path, lora_weight=1.0)
image = pipeline(
prompt=settings["image"]["prompt"],
negative_prompt=settings["image"]["negative_prompt"],
guidance_scale=settings["image"]["guidance_scale"],
num_inference_steps=settings["image"]["num_inference_steps"],
generator=torch.Generator("cuda").manual_seed(settings["image"]["seed"]),
height=settings["image"]["height"],
width=settings["image"]["width"],
).images[0]
# todo: Update file with generation details
save(image=image, settings=settings, preview=True)
run()
# LyCORIS
While on civitai it is labeled LoRA, it is actually LyCORIS
https://civitai.com/models/13910?modelVersionId=16368
# Model
https://huggingface.co/sakistriker/AbyssOrangeMix3
# ./lib/utils.py
import json
import os
import random
import torch
from builtins import open, round
from datetime import datetime
from diffusers.utils import logging
from shutil import copy2
console = logging.get_logger("diffusers")
cwd = os.getcwd()
def get_settings(
num_inference_steps=random.randrange(25, 60),
guidance_scale=round(random.uniform(6.5, 10.5), 1),
seed=torch.Generator("cuda").seed(),
num_train_timesteps=935,
torch_dtype="float16",
):
return {
"image": {
"seed": seed,
"prompt": f"1girl, orange hair, red eyes, loli, 1girl, anime, black bow, high quality, masterpiece, absurbres, thighhighs, sfw",
"negative_prompt": f"(worst quality, low quality:1.4), multiple girls, (nose, tooth, rouge, eyeshadow:1.4), deformed, (jpeg artifacts:1.4), (depth of field, bokeh, blurry, film grain, chromatic aberration, lens flare:1.0), (1boy, abs, muscular, rib:1.0), greyscale, monochrome, dusty sunbeams, motion blur, text, title, logo, signature, panties, nsfw",
"width": 512,
"height": 768,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps,
},
"eular_a": {
"beta_start": 0.001775,
"beta_end": 0.01,
"beta_schedule": "linear",
"num_train_timesteps": num_train_timesteps,
"prediction_type": "epsilon",
},
"pipeline": {
# https://huggingface.co/sakistriker/AbyssOrangeMix3
"pretrained_model_name_or_path": "AOM3",
"cache_dir": f"{cwd}/assets/models/",
"torch_dtype": torch_dtype,
"device": "cuda",
"device_map": "sequential",
"use_safetensors": True,
"safety_checker": None,
"requires_safety_checker": False,
"local_files_only": True,
"force_download": False,
}
}
def get_tensor(name):
match name:
case "float16":
return torch.float16
case "float32":
return torch.float32
case "float64":
return torch.float64
def save(image, settings, preview=False, output=True):
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
json.dump(settings, open(f"./outputs/{timestamp}.json", "w"), indent=4)
if output:
image.save(f"./outputs/{timestamp}.png")
console.info(f"Saved image to ./outputs/{timestamp}.png")
if preview and output:
copy2(f"./outputs/{timestamp}.json", "./outputs/preview.json")
copy2(f"./outputs/{timestamp}.png", "./outputs/preview.png")
console.info("Saved preview.")
if preview and not output:
copy2(f"./outputs/{timestamp}.json", "./outputs/preview.json")
image.save(f"./outputs/{timestamp}.png")
console.info(f"Saved image to ./outputs/{timestamp}.png")
console.info("Saved preview.")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment