Skip to content

Instantly share code, notes, and snippets.

View Clybius's full-sized avatar

Clybius

View GitHub Profile
@Clybius
Clybius / XLOmniV2.4.py
Created July 16, 2024 05:02
XLOmniV2.4 Merge Recipe
import sd_mecha
from sd_mecha.hypers import Hyper
from sd_mecha.merge_methods import SameMergeSpace
from sd_mecha.extensions.merge_method import LiftFlag, convert_to_recipe
from sd_mecha.merge_space import MergeSpace
import torch
from torch import Tensor
from typing import Optional, Dict, Mapping, TypeVar
import functools
import math
@Clybius
Clybius / model_merger.py
Created February 9, 2024 17:16
ComfyUI Model Merger Custom Nodes
import torch
from tqdm.auto import trange
import tqdm
def train_difference(a, b, c, key):
a = a[key][0]
b = b[key][0]
c = c[key][0]
merged = []
atype = a.dtype
@Clybius
Clybius / README.txt
Created July 3, 2023 17:55
DPM++ 2M SDE Adaptive Sampler | A modified 2M SDE sampler complete with cosine similarity matching with a 2S step, and adaptive second order sampling.
# How to setup in A1111 Stable Diffusion and its various forks
1) You can install the sampler by adding it to the bottom of `repositories/k-diffusion/k_diffusion/sampling.py`
2) Then within `modules/sd_samplers_kdiffusion.py`, add the following to `samplers_k_diffusion`
('DPM++ 2M SDE Adaptive', 'sample_dpmpp_2m_sde_adaptive', ['c_dpmpp_2m_sde_ad'], {"brownian_noise": True, 'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
Notes: This has been tested on Vlad's A1111 fork. Step 2 may not be exactly the same on native A1111, but you oughta be able to work around any errors with some contextual clues.
@Clybius
Clybius / perlin.py
Created May 17, 2023 17:08 — forked from vadimkantorov/perlin.py
Perlin noise in PyTorch
# ported from https://github.com/pvigier/perlin-numpy/blob/master/perlin2d.py
import torch
import math
def rand_perlin_2d(shape, res, fade = lambda t: 6*t**5 - 15*t**4 + 10*t**3):
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), torch.arange(0, res[1], delta[1])), dim = -1) % 1
@Clybius
Clybius / sampling.py
Created April 26, 2023 16:07
DPM++ 2M LSA (Low Sigma Ancestral) Sampler (K-Diffusion)
@torch.no_grad()
def sample_dpmpp_2m_lsa(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
"""DPM-Solver++(2M) with ancestral noise at sigmas below 1.1."""
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
old_denoised = None
@Clybius
Clybius / index.html
Last active August 12, 2024 14:35
Embed AV1/No File Size Capped Videos in Discord
<head>
<meta property="og:image" content="GifToEmbedURL"> # Change the content to the link of a gif of your choice, which will be shown as the embed.
<meta property="og:type" content="video.other">
<meta property="og:video:url" content="VideoToEmbedURL"> # Change the content to the link of a video of your choice. Will work with videos over 50 MB, and even unsupported codecs such as AV1!
<meta property="og:video:width" content="1920"> # Set this to the video's width and height, not required, but will show the video as intended if the aspect ratio and size is correct.
<meta property="og:video:height" content="1080">
</head>