Skip to content

Instantly share code, notes, and snippets.

import argparse
import os
import torch
import uuid
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler
argParser = argparse.ArgumentParser()
argParser.add_argument("-p", "--prompt", help="prompt")
argParser.add_argument("-np", "--negprompt", help="negative prompt")
import os
import torch
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler
model_path = os.getenv('WEIGHTS_DIR')
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
pipe = StableDiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, safety_checker=None, torch_dtype=torch.float16).to("cuda")
#@markdown Run to generate a grid of preview images from the last saved weights.
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
weights_folder = os.getenv('OUTPUT_DIR')
folders = sorted([f for f in os.listdir(weights_folder) if f != "0"], key=lambda x: int(x))
row = len(folders)
col = len(os.listdir(os.path.join(weights_folder, folders[0], "samples")))
# You can also add multiple concepts here. Try tweaking `--max_train_steps` accordingly.
concepts_list = [
# {
# "instance_prompt": "photo of zwx dog",
# "class_prompt": "photo of a dog",
# "instance_data_dir": "/content/data/zwx",
# "class_data_dir": "/content/data/dog"
# },
{