Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

View takuma104's full-sized avatar

Takuma Mori takuma104

  • Tokyo
View GitHub Profile
import torch
import sys
from safetensors.torch import load_file
if __name__ == "__main__":
filename = sys.argv[1]
state_dict = load_file(filename)
for key, value in state_dict.items():
if "lora_down" in key:
lora_name = key.split(".")[0]
import math
import numpy as np
import safetensors
import torch
import torch.nn as nn
from PIL import Image
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline
from diffusers.utils import _get_model_file, DIFFUSERS_CACHE
import torch
import json
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.models.attention import Attention
from diffusers.models.attention_processor import (
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
)
@takuma104
takuma104 / untitled11.ipynb
Last active May 31, 2023 16:34
monkey_patch_minimum_test.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
import json
import struct
import sys
from pprint import pprint
def parse_safetensors_header(file_path):
# modified from https://huggingface.co/docs/safetensors/metadata_parsing
with open(file_path, 'rb') as f:
header_bytes = f.read(8)
# Interpret the bytes as a little-endian unsigned 64-bit integer
import torch
import json
from diffusers import StableDiffusionPipeline
def print_memory_usage(width, height, batch, xformers, with_lora):
def on_off(cond):
return 'ON' if cond else 'OFF'
mem_bytes = torch.cuda.max_memory_allocated()
mem_MB = int(mem_bytes/(10**6))
dict = {'width':width, 'height':height, 'batch':batch, 'xformers':on_off(xformers),
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
def create_pipeline():
pipe = StableDiffusionPipeline.from_pretrained(
"gsdf/Counterfeit-V2.5", torch_dtype=torch.float16, safety_checker=None
).to("cuda")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
pipe.enable_xformers_memory_efficient_attention()
return pipe
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
def create_pipeline():
pipe = StableDiffusionPipeline.from_pretrained(
"gsdf/Counterfeit-V2.5", torch_dtype=torch.float16, safety_checker=None
).to("cuda")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
pipe.enable_xformers_memory_efficient_attention()
return pipe
import torch
import safetensors.torch
import sys
def dump_keys(parent, suffix=''):
for k in sorted(parent.keys()):
if isinstance(parent[k], torch.Tensor):
print(f'{suffix}{k} {list(parent[k].shape)} mean={torch.mean(parent[k]):.3g} std={torch.std(parent[k]):.3g}')
else:
dump_keys(parent[k], f'{suffix}{k}.')
@takuma104
takuma104 / untitled11.ipynb
Last active May 19, 2023 18:26
monkey_patch_minimum_test.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.