Skip to content

Instantly share code, notes, and snippets.

View takuma104's full-sized avatar

Takuma Mori takuma104

  • Tokyo
View GitHub Profile
# Script for converting a HF Diffusers saved pipeline to a ControlNet checkpoint.
# *Only* converts the ControlNet.
# Does not convert optimizer state or any other thing.
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
import math
import safetensors
import torch
from diffusers import DiffusionPipeline
"""
Kohya's LoRA format Loader for Diffusers
Usage:
```py
@takuma104
takuma104 / README.md
Last active February 8, 2024 01:42
clip_text_custom_embedder

Usage

from clip_text_custom_embedder import text_embeddings
from diffusers import StableDiffusionPipeline
import torch
import torch
import json
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.models.attention import Attention
from diffusers.models.attention_processor import (
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
)
import math
import numpy as np
import safetensors
import torch
import torch.nn as nn
from PIL import Image
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline
from diffusers.utils import _get_model_file, DIFFUSERS_CACHE
import torch
import json
from diffusers import StableDiffusionPipeline
def print_memory_usage(width, height, batch, xformers, with_lora):
def on_off(cond):
return 'ON' if cond else 'OFF'
mem_bytes = torch.cuda.max_memory_allocated()
mem_MB = int(mem_bytes/(10**6))
dict = {'width':width, 'height':height, 'batch':batch, 'xformers':on_off(xformers),
@takuma104
takuma104 / diffusers_sd_xformers_flash_attention.py
Last active July 29, 2023 20:09
Generating (almost) reproducible pictures using Diffusers with xFormers
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
plt.rcParams["figure.figsize"] = (10,5)
plt.rcParams['figure.facecolor'] = 'white'
def generate_tuxedo_cat_picture(fn_prefix, seed=0):
import torch
import sys
from safetensors.torch import load_file
if __name__ == "__main__":
filename = sys.argv[1]
state_dict = load_file(filename)
for key, value in state_dict.items():
if "lora_down" in key:
lora_name = key.split(".")[0]
@takuma104
takuma104 / untitled11.ipynb
Last active May 31, 2023 16:34
monkey_patch_minimum_test.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
import json
import struct
import sys
from pprint import pprint
def parse_safetensors_header(file_path):
# modified from https://huggingface.co/docs/safetensors/metadata_parsing
with open(file_path, 'rb') as f:
header_bytes = f.read(8)
# Interpret the bytes as a little-endian unsigned 64-bit integer