Skip to content

Instantly share code, notes, and snippets.

View laksjdjf's full-sized avatar
🌏
On Earth

laksjdjf

🌏
On Earth
View GitHub Profile
@laksjdjf
laksjdjf / chat.py
Last active April 25, 2024 15:20
デフォルト設定はcommand -r 用
import gradio as gr
import json
import requests
import argparse
from dataclasses import dataclass
############### utils ###############
BAN_TOKENS = ["<|END_OF_TURN_TOKEN|>"] # command -r 用の回避トークン
parser = argparse.ArgumentParser()
from comfy.samplers import KSAMPLER
import torch
from comfy.k_diffusion.sampling import default_noise_sampler, to_d
from tqdm.auto import trange
@torch.no_grad()
def sampler_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, gamma=None):
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
# https://github.com/huggingface/transformers/blob/838b87abe231fd70be5132088d0dee72a7bb8d62/src/transformers/models/opt/modeling_opt.py#L147
"""
model = AutoModelForCausalLM.from_pretrained("p1atdev/dart-v1-sft")
apply_hook(model)
"""
import torch
import torch.nn as nn
def forward_hooker(self):
# https://huggingface.co/shadowlilac/aesthetic-shadow-v2
from transformers import pipeline
import torch
from PIL import Image
from comfy.ldm.modules.attention import optimized_attention
def optimized_forward(self):
def forward(hidden_states, head_mask = None, output_attentions = False):
query = self.query(hidden_states)
def make_unet_conversion_map():
unet_conversion_map_layer = []
# unet
# https://github.com/kohya-ss/sd-scripts/blob/2d7389185c021bc527b414563c245c5489d6328a/library/sdxl_model_util.py#L293
for i in range(3): # num_blocks is 3 in sdxl
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
import torch
class VisualStylePrompting:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model": ("MODEL",),
"reference": ("LATENT",),
"depth": ("INT", {"default": 0, "min": -1, "max": 12}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
# ref: ScaleCrafter https://github.com/YingqingHe/ScaleCrafter
import math
import comfy.ops
import torch.nn.functional as F
ops = comfy.ops.disable_weight_init
class ScaleCrafter:
@classmethod
def INPUT_TYPES(s):
@laksjdjf
laksjdjf / dilate_conv.py
Last active March 6, 2024 11:35
Reference from ScaleCrafter[https://arxiv.org/abs/2310.07702]
# https://arxiv.org/abs/2310.07702
import comfy.ops
ops = comfy.ops.disable_weight_init
class DilateConv:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
===========================================================================================================================================================
Layer (type (var_name)) Input Shape Output Shape Param # Kernel Shape
===========================================================================================================================================================
StableCascadeUnet (StableCascadeUnet) -- [1, 4, 256, 256] -- 3
├─Linear (clip_txt_pooled_mapper) [1, 1, 1280] [1, 1, 5120] 6,558,720 --
├─LayerNorm (clip_norm) [1, 4, 1280] [1, 4, 1280] -- --
├─Sequential (embedding) [1, 4, 256, 256] [1, 320, 128, 128] -- --
│ └─PixelUnshuf
===========================================================================================================================================================
Layer (type (var_name)) Input Shape Output Shape Param # Kernel Shape
===========================================================================================================================================================
StableCascadeUnet (StableCascadeUnet) [2, 16, 24, 24] [2, 16, 24, 24] 8,923,136 3
├─Linear (clip_txt_pooled_mapper) [2, 77, 1280] [2, 77, 8192] 10,493,952 --
├─LayerNorm (clip_norm) [2, 308, 2048] [2, 308, 2048] -- --
├─Sequential (embedding) [2, 16, 24, 24] [2, 2048, 24, 24] -- --
│ └─PixelUnshuf