Last active
March 6, 2024 06:29
-
-
Save guilt/ce2b1988cfbce9d1fe409c7dc3f6e760 to your computer and use it in GitHub Desktop.
Stable Diffusion
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --index-url https://download.pytorch.org/whl/nightly/cpu | |
accelerate | |
diffusers @ git+https://github.com/kashif/diffusers.git@a3dc21385b7386beb3dab3a9845962ede6765887 | |
huggingface | |
pillow | |
python-slugify[unidecode] | |
torch | |
transformers | |
peft |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
from accelerate import Accelerator | |
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline | |
from slugify import slugify | |
import torch | |
priorModelId = "stabilityai/stable-cascade-prior" | |
modelId = "stabilityai/stable-cascade" | |
dataType = torch.float32 #16 is buggy | |
outputDir = "Output" | |
# Use the Pipelines | |
prior = StableCascadePriorPipeline.from_pretrained(priorModelId, torch_dtype=dataType) | |
decoder = StableCascadeDecoderPipeline.from_pretrained(modelId, torch_dtype=dataType) | |
# Use Accelerator | |
accelerator = Accelerator() | |
prior = prior.to(accelerator.device) | |
decoder = decoder.to(accelerator.device) | |
# User Data | |
userPrompt = input("Enter a description of what you would like to Draw: ") | |
userPrompt = userPrompt or "A potato" | |
negativePrompt = input("Enter a description of what you would not like to Draw: ") | |
negativePrompt = negativePrompt or "" | |
inferenceSteps = 25 # Default of 50. | |
numImages = 4 # Should set to 1 for 1 Image. | |
# Start Generating | |
slug = slugify(userPrompt) | |
print("Using {} to generate image for Prompt: {} ...".format(accelerator.device, userPrompt)) | |
priorOutput = prior( | |
prompt=userPrompt, | |
negative_prompt=negativePrompt, | |
height=1024, | |
width=1024, | |
guidance_scale=4.0, | |
num_images_per_prompt=numImages, | |
num_inference_steps=inferenceSteps | |
) | |
imagesGenerated = decoder( | |
image_embeddings=priorOutput.image_embeddings, | |
prompt=userPrompt, | |
negative_prompt=negativePrompt, | |
guidance_scale=0.0, | |
output_type="pil", | |
num_inference_steps=inferenceSteps | |
) | |
if not os.path.exists(outputDir): | |
os.makedirs(outputDir) | |
for idx, image in enumerate(imagesGenerated.images): | |
fileToGenerate = os.path.join(outputDir, "SDC-{}-{}.png".format(slug, idx)) | |
image.save(fileToGenerate) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
from accelerate import Accelerator | |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler | |
from slugify import slugify | |
import torch | |
modelId = "stabilityai/stable-diffusion-2-1" | |
outputDir = "Output" | |
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead | |
pipe = StableDiffusionPipeline.from_pretrained(modelId) | |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) | |
# Use Accelerator | |
accelerator = Accelerator() | |
pipe = pipe.to(accelerator.device) | |
# User Data | |
userPrompt = input("Enter a description of what you would like to Draw: ") | |
userPrompt = userPrompt or "A potato" | |
inferenceSteps = 25 # Default of 50. | |
numImages = 4 # Should set to 1 for 1 Image. | |
# Start Generating | |
slug = slugify(userPrompt) | |
print("Using {} to generate image for Prompt: {} ...".format(accelerator.device, userPrompt)) | |
if not os.path.exists(outputDir): | |
os.makedirs(outputDir) | |
for idx in range(numImages): | |
imagesGenerated = pipe(prompt = userPrompt, num_inference_steps = inferenceSteps) | |
for _, image in enumerate(imagesGenerated.images): | |
fileToGenerate = os.path.join(outputDir, "SDv2-{}-{}.png".format(slug, idx)) | |
image.save(fileToGenerate) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment