Skip to content

Instantly share code, notes, and snippets.

View laksjdjf's full-sized avatar
🌏
On Earth

laksjdjf

🌏
On Earth
View GitHub Profile
import torch
from einops import rearrange, repeat
def block_to_key(block):
if block[0] == "input":
return "in" + str(block[1])
elif block[0] == "output":
return "out" + str(block[1])
elif block[0] == "middle":
return "mid"
'''
https://arxiv.org/abs/2312.00858
1. put this file in ComfyUI/custom_nodes
2. load node from <loaders>
start_step, end_step: apply this method when the timestep is between start_step and end_step
cache_interval: interval of caching (1 means no caching)
cache_depth: depth of caching
'''
@laksjdjf
laksjdjf / chat.py
Last active April 25, 2024 15:20
デフォルト設定はcommand -r 用
import gradio as gr
import json
import requests
import argparse
from dataclasses import dataclass
############### utils ###############
BAN_TOKENS = ["<|END_OF_TURN_TOKEN|>"] # command -r 用の回避トークン
parser = argparse.ArgumentParser()
def make_unet_conversion_map():
unet_conversion_map_layer = []
# unet
# https://github.com/kohya-ss/sd-scripts/blob/2d7389185c021bc527b414563c245c5489d6328a/library/sdxl_model_util.py#L293
for i in range(3): # num_blocks is 3 in sdxl
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
'''
https://gist.github.com/kohya-ss/3f774da220df102548093a7abc8538ed
1. put this file in ComfyUI/custom_nodes
2. load node from <loaders>
'''
import torch
from comfy.ldm.modules.diffusionmodules.openaimodel import forward_timestep_embed, timestep_embedding, th
def apply_control(h, control, name):
from comfy.samplers import KSAMPLER
import torch
from comfy.k_diffusion.sampling import default_noise_sampler, to_d
from tqdm.auto import trange
@torch.no_grad()
def sampler_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, gamma=None):
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
# https://github.com/huggingface/transformers/blob/838b87abe231fd70be5132088d0dee72a7bb8d62/src/transformers/models/opt/modeling_opt.py#L147
"""
model = AutoModelForCausalLM.from_pretrained("p1atdev/dart-v1-sft")
apply_hook(model)
"""
import torch
import torch.nn as nn
def forward_hooker(self):
# https://huggingface.co/shadowlilac/aesthetic-shadow-v2
from transformers import pipeline
import torch
from PIL import Image
from comfy.ldm.modules.attention import optimized_attention
def optimized_forward(self):
def forward(hidden_states, head_mask = None, output_attentions = False):
query = self.query(hidden_states)
@laksjdjf
laksjdjf / dilate_conv.py
Last active March 6, 2024 11:35
Reference from ScaleCrafter[https://arxiv.org/abs/2310.07702]
# https://arxiv.org/abs/2310.07702
import comfy.ops
ops = comfy.ops.disable_weight_init
class DilateConv:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
import torch
class VisualStylePrompting:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model": ("MODEL",),
"reference": ("LATENT",),
"depth": ("INT", {"default": 0, "min": -1, "max": 12}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),