Skip to content

Instantly share code, notes, and snippets.

@maxim04
Last active January 13, 2023 09:28
Show Gist options
  • Save maxim04/41ddac4e1c33405b0dba05889d8cef21 to your computer and use it in GitHub Desktop.
Save maxim04/41ddac4e1c33405b0dba05889d8cef21 to your computer and use it in GitHub Desktop.
import argparse
import os
import torch
import uuid
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler
argParser = argparse.ArgumentParser()
argParser.add_argument("-p", "--prompt", help="prompt")
argParser.add_argument("-np", "--negprompt", help="negative prompt")
argParser.add_argument("-n", "--numsamples", help="num samples")
argParser.add_argument("-g", "--gscale", help="guidance scale")
argParser.add_argument("-i", "--infsteps", help="inference steps")
args = argParser.parse_args()
model_path = os.getenv('WEIGHTS_DIR')
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
pipe = StableDiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, safety_checker=None, torch_dtype=torch.float16).to("cuda")
g_cuda = None
prompt = args.prompt
negative_prompt = args.negprompt
num_samples = int(args.numsamples)
guidance_scale = float(args.gscale)
num_inference_steps = int(args.infsteps)
height = 512
width = 512
with autocast("cuda"), torch.inference_mode():
images = pipe(
prompt,
height=height,
width=width,
negative_prompt=negative_prompt,
num_images_per_prompt=num_samples,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
generator=g_cuda
).images
for index, img in enumerate(images):
img.save("generated/img-{}.png".format(str(uuid.uuid1())))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment