Skip to content

Instantly share code, notes, and snippets.

View MarkinHaus's full-sized avatar

Markin Hausmanns MarkinHaus

View GitHub Profile
"""
LiteLLM Custom Provider for your LLM Gateway
Installation:
1. Place this file in your project directory
2. Update litellm's openai_compatible_providers.json OR
3. Use the CustomLLM class approach for full control
Usage:
import litellm
@MarkinHaus
MarkinHaus / minicli.py
Created September 3, 2024 22:03
ToolBoxV2 - Network - CLInterface
import asyncio
import datetime
import inspect
import os
import threading
try:
import psutil
IS_PSUTIL = True
except ImportError:
psutil = None
@MarkinHaus
MarkinHaus / st_runner.py
Created March 5, 2024 22:20
stramlit runner for content gen
import streamlit as st
from content_gen import PipelineManager
# from toolboxv2.utils.toolbox import ProxyApp
def main():
manager = PipelineManager()
st.sidebar.title("Generation Tasks")
task = st.sidebar.selectbox("Select a task", ["Classic Generation", "Tiny Generation", "Text-to-Image Generation",
@MarkinHaus
MarkinHaus / content_gen.py
Created March 5, 2024 22:19
Generation with Transformer and HF
# cli_functions.py
# pip install git+https://github.com/huggingface/accelerate
import os
from diffusers import (StableDiffusionPipeline, AutoencoderTiny, AutoPipelineForText2Image, AutoPipelineForImage2Image, \
StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, DiffusionPipeline, LCMScheduler, \
AutoPipelineForInpainting, StableDiffusionControlNetPipeline, ControlNetModel,
AnimateDiffPipeline,
MotionAdapter, StableVideoDiffusionPipeline)
from diffusers.utils import load_image, make_image_grid, export_to_gif, export_to_video