Skip to content

Instantly share code, notes, and snippets.

@catboxanon
Last active September 2, 2023 16:45
Show Gist options
  • Save catboxanon/bdcf23a2699c920da0d8177dbfa34e15 to your computer and use it in GitHub Desktop.
Save catboxanon/bdcf23a2699c920da0d8177dbfa34e15 to your computer and use it in GitHub Desktop.
import os
import torch
import gradio as gr
from modules import script_callbacks, scripts, shared
class CLIPBlend:
def __init__(self):
self.process_tokens = None
def __call__(self, remade_batch_tokens, multipliers):
if shared.opts.clip_blend_enabled:
ratio1 = shared.opts.clip_blend_ratio1
ratio2 = shared.opts.clip_blend_ratio2
CLIP_original = shared.opts.CLIP_stop_at_last_layers
shared.opts.CLIP_stop_at_last_layers = 1
z1 = self.process_tokens(remade_batch_tokens, multipliers) # type: ignore
shared.opts.CLIP_stop_at_last_layers = 2
z2 = self.process_tokens(remade_batch_tokens, multipliers) # type: ignore
shared.opts.CLIP_stop_at_last_layers = CLIP_original
blend1 = z1*(1.-ratio1)+z2*ratio1
if shared.opts.clip_blend_concat:
blend2 = z1*(1.-ratio2)+z2*ratio2
return torch.hstack([blend1,blend2])
return blend1
z = self.process_tokens(remade_batch_tokens, multipliers) # type: ignore
return z
clip_blend = CLIPBlend()
def on_model_loaded(sd_model):
try:
clip_blend.process_tokens = sd_model.cond_stage_model.process_tokens
sd_model.cond_stage_model.process_tokens = clip_blend
except AttributeError:
pass
def on_script_unloaded():
cond_stage_model = shared.sd_model.cond_stage_model # type: ignore
try:
if type(cond_stage_model.process_tokens) == CLIPBlend:
cond_stage_model.process_tokens = cond_stage_model.process_tokens.process_tokens
except AttributeError:
pass
def on_ui_settings():
section = ('clip_blend', 'CLIP Blend')
shared.opts.add_option('clip_blend_enabled', shared.OptionInfo(
False, 'Enable CLIP Blend', section=section
))
shared.opts.add_option("clip_blend_ratio1", shared.OptionInfo(
0, "Blend Ratio (R1) - The result is calculated as Z1 * (1 - R1) + Z2 * R1. Set to 0 to get CLIP skip=1; set to 1 to get CLIP skip=2",
gr.Slider, {"minimum": -1, "maximum": 2, "step": 0.1}, section=section
))
shared.opts.add_option("clip_blend_concat", shared.OptionInfo(
False, "Concatenate second CLIP Blend", section=section
))
shared.opts.add_option("clip_blend_ratio2", shared.OptionInfo(
1, "Blend Ratio (R2) - The result is calculated as Z1 * (1 - R2) + Z2 * R2. Set to 0 to get CLIP skip=1; set to 1 to get CLIP skip=2",
gr.Slider, {"minimum": -1, "maximum": 2, "step": 0.1}, section=section
))
def format_bool(x):
return not x.lower().strip() in ['0', 'false']
def apply_override(p, opt, x, is_bool=False):
getattr(p, 'override_settings')[opt] = format_bool(x) if is_bool else x
def apply_label(label, x, is_bool=False):
return f"{label + (': ' if label else '')}{str(format_bool(x)) if is_bool else x}"
for scriptDataTuple in scripts.scripts_data:
if os.path.basename(scriptDataTuple.path) == "xyz_grid.py":
xy_grid = scriptDataTuple.module
blend_enabled = xy_grid.AxisOption(
"[CLIP Blend] Blend Enabled",
str,
lambda p, field, _: apply_override(p, 'clip_blend_enabled', field, is_bool=True),
format_value=lambda p, opt, x: apply_label('CLIP Blend Enabled', x, is_bool=True)
)
ratio1 = xy_grid.AxisOption(
"[CLIP Blend] Blend Ratio (R1)",
float,
lambda p, field, _: apply_override(p, 'clip_blend_ratio1', field, is_bool=False)
)
concat_enabled = xy_grid.AxisOption(
"[CLIP Blend] Concatenate Second Blend",
str,
lambda p, field, _: apply_override(p, 'clip_blend_concat', field, is_bool=True),
format_value=lambda p, opt, x: apply_label('Concatenated Second CLIP Blend', x, is_bool=True)
)
ratio2 = xy_grid.AxisOption(
"[CLIP Blend] Blend Ratio (R2)",
float,
lambda p, field, _: apply_override(p, 'clip_blend_ratio2', field, is_bool=False)
)
xy_grid.axis_options.extend([
blend_enabled,
ratio1,
concat_enabled,
ratio2
])
script_callbacks.on_script_unloaded(on_script_unloaded)
script_callbacks.on_model_loaded(on_model_loaded)
script_callbacks.on_ui_settings(on_ui_settings)
@catboxanon
Copy link
Author

catboxanon commented Mar 2, 2023

Note: As of version 1.6.0, the webui now caches the prompt conds when it can by default. This means that attempting to use this extension with the XYZ script or quickly toggling this script for the same prompt will yield the previous result. This can be disabled in settings under Optimizations -> Persistent cond cache

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment