Created
August 23, 2023 07:45
-
-
Save eavae/a336029601f726737a0fd070f44c2f8a to your computer and use it in GitHub Desktop.
A script help you convert diffusers lora to sd webui format
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from pathlib import Path | |
from diffusers import StableDiffusionXLPipeline | |
import torch | |
from safetensors.torch import save_file | |
# text_encoder.text_model.encoder.layers.0.self_attn.k_proj.lora_linear_layer.down.weight | |
# lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight | |
# 1. text_encoder -> lora_te, text_encoder_2 -> lora_te2 | |
# 2. map | |
# 3. .weight -> 2 .alpha -> 1 and replace . -> _ | |
# test: | |
# 1. lora_te.text_model.encoder.layers.0.self_attn.k_proj.lora_linear_layer.down.weight | |
# 2. lora_te.text_model.encoder.layers.0.self_attn.k_proj.lora_down.weight | |
# 2. lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight | |
# unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight | |
# lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k.lora_down.weight | |
# 1. unet -> lora_unet | |
# 2. map | |
# 4. .weight -> 2 .alpha -> 1 and replace . -> _ | |
# test: | |
# 1. lora_unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight | |
# 2. lora_unet.down_blocks_1_attentions_0_transformer_blocks_0_attn1.to_k.lora_down.weight | |
# 4. lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k.lora_down.weight | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32, local_files_only=True | |
) | |
state_dict, network_alphas = pipe.lora_state_dict( | |
Path("<your_lora.safetensors>"), local_files_only=True | |
) | |
LORA_CLIP_MAP = { | |
"mlp.fc1": "mlp_fc1", | |
"mlp.fc2": "mlp_fc2", | |
"self_attn.k_proj": "self_attn_k_proj", | |
"self_attn.q_proj": "self_attn_q_proj", | |
"self_attn.v_proj": "self_attn_v_proj", | |
"self_attn.out_proj": "self_attn_out_proj", | |
"lora_linear_layer.down": "lora_down", | |
"lora_linear_layer.up": "lora_up", | |
} | |
LORA_UNET_MAP = { | |
"processor.to_q_lora.down": "to_q.lora_down", | |
"processor.to_q_lora.up": "to_q.lora_up", | |
"processor.to_k_lora.down": "to_k.lora_down", | |
"processor.to_k_lora.up": "to_k.lora_up", | |
"processor.to_v_lora.down": "to_v.lora_down", | |
"processor.to_v_lora.up": "to_v.lora_up", | |
"processor.to_out_lora.down": "to_out_0.lora_down", | |
"processor.to_out_lora.up": "to_out_0.lora_up", | |
"processor.to_q.alpha": "to_q.alpha", | |
"processor.to_k.alpha": "to_k.alpha", | |
"processor.to_v.alpha": "to_v.alpha", | |
} | |
webui_lora_state_dict = {} | |
for k, v in state_dict.items(): | |
is_text_encoder = False | |
prefix = k.split(".")[0] | |
if prefix == "text_encoder": | |
k = k.replace("text_encoder", "lora_te1") | |
is_text_encoder = True | |
elif prefix == "text_encoder_2": | |
k = k.replace("text_encoder_2", "lora_te2") | |
is_text_encoder = True | |
elif prefix == "unet": | |
k = k.replace("unet", "lora_unet") | |
if is_text_encoder: | |
for map_k, map_v in LORA_CLIP_MAP.items(): | |
k = k.replace(map_k, map_v) | |
else: | |
for map_k, map_v in LORA_UNET_MAP.items(): | |
k = k.replace(map_k, map_v) | |
keep_dots = 0 | |
if k.endswith(".alpha"): | |
keep_dots = 1 | |
elif k.endswith(".weight"): | |
keep_dots = 2 | |
parts = k.split(".") | |
k = "_".join(parts[:-keep_dots]) + "." + ".".join(parts[-keep_dots:]) | |
webui_lora_state_dict[k] = v | |
save_file(webui_lora_state_dict, "<your_lora_for_webui.safetensors>") |
Hello!
Can you tell me how can I run this on google colab?
https://imgur.com/DAJrBfy
I tried to run it, but I get an error.
Thanks! The help will be appreciated!
That works for me! Great thanks!
If I have original Lora with that activates with " wny" how do i put that in the lora? Where to edit the code?
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
I only tested this with dreambooth lora, and I am not sure about if it works with others. I think the differences happened with the keys of state dict. So, one possible solution is you can check the keys of state dict you trained, and load a webui compatible state dict to compare the differences.