Skip to content

Instantly share code, notes, and snippets.

@Trojaner
Last active June 7, 2024 17:43
Show Gist options
  • Save Trojaner/b8a5404d4c15632cb492cae7e3f5ffba to your computer and use it in GitHub Desktop.
Save Trojaner/b8a5404d4c15632cb492cae7e3f5ffba to your computer and use it in GitHub Desktop.
kara-audio (GPL-3)
import argparse
import hashlib
import os
import platform
import signal
import site
import subprocess
import sys
from pathlib import Path
# Define the required PyTorch version
TORCH_VERSION = "2.2.1"
TORCHVISION_VERSION = "0.17.1"
TORCHAUDIO_VERSION = "2.2.1"
# Environment
script_dir = os.getcwd()
conda_env_path = os.path.join(script_dir, "installer_files", "env")
app_model_path = os.path.join(script_dir, "model")
def signal_handler(sig, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def is_linux():
return sys.platform.startswith("linux")
def is_windows():
return sys.platform.startswith("win")
def is_macos():
return sys.platform.startswith("darwin")
def is_x86_64():
return platform.machine() == "x86_64"
def cpu_has_avx2():
try:
import cpuinfo
info = cpuinfo.get_cpu_info()
if 'avx2' in info['flags']:
return True
else:
return False
except:
return True
def cpu_has_amx():
try:
import cpuinfo
info = cpuinfo.get_cpu_info()
if 'amx' in info['flags']:
return True
else:
return False
except:
return True
def torch_version():
site_packages_path = None
for sitedir in site.getsitepackages():
if "site-packages" in sitedir and conda_env_path in sitedir:
site_packages_path = sitedir
break
if site_packages_path:
torch_version_file = open(os.path.join(site_packages_path, 'torch', 'version.py')).read().splitlines()
torver = [line for line in torch_version_file if line.startswith('__version__')][0].split('__version__ = ')[1].strip("'")
else:
from torch import __version__ as torver
return torver
def update_pytorch():
print_big_message("Checking for PyTorch updates")
torver = torch_version()
is_cuda = '+cu' in torver
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
is_cpu = '+cpu' in torver # 2.0.1+cpu
install_pytorch = f"python -m pip install --upgrade torch=={TORCH_VERSION} torchvision=={TORCHVISION_VERSION} torchaudio=={TORCHAUDIO_VERSION} "
if is_cuda118:
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
elif is_cuda:
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
elif is_cpu:
install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
elif is_intel:
if is_linux():
install_pytorch = "python -m pip install --upgrade torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
else:
install_pytorch = "python -m pip install --upgrade torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
run_cmd(f"{install_pytorch}", assert_success=True, environment=True)
# ABUS
# conda search -c conda-forge cudnn
def update_cudnn():
print_big_message("Checking for cuDNN updates")
torver = torch_version()
is_cuda = '+cu' in torver
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
is_cpu = '+cpu' in torver # 2.0.1+cpu
# onnxruntime
run_cmd(f"python -m pip uninstall --yes onnxruntime", environment=True)
run_cmd(f"python -m pip uninstall --yes onnxruntime-gpu", environment=True)
if is_cuda:
run_cmd(f"python -m pip install nvidia-cudnn-cu12", assert_success=True, environment=True)
run_cmd(f"python -m pip install --upgrade onnxruntime-gpu", environment=True)
set_cudnn_env()
elif is_cuda118:
run_cmd(f"python -m pip install nvidia-cudnn-cu11", assert_success=True, environment=True)
run_cmd(f"python -m pip install --upgrade onnxruntime-gpu", environment=True)
set_cudnn_env()
else:
run_cmd(f"python -m pip install --upgrade onnxruntime", environment=True)
# nomkl
run_cmd(f'conda install --yes nomkl --channel conda-forge', environment=True)
def set_cudnn_env():
import os
import nvidia.cudnn.lib
# import nvidia.cublas.lib
cudnn_path = os.path.dirname(nvidia.cudnn.lib.__file__)
# cublas_path = os.path.dirname(nvidia.cublas.lib.__file__)
# cmd = f"conda env config vars set LD_LIBRARY_PATH='{cublas_path}':'{cudnn_path}'"
cmd = f"conda env config vars set LD_LIBRARY_PATH='{cudnn_path}'"
run_cmd(cmd, environment=True)
def is_installed():
site_packages_path = None
for sitedir in site.getsitepackages():
if "site-packages" in sitedir and conda_env_path in sitedir:
site_packages_path = sitedir
break
if site_packages_path:
return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py'))
else:
return os.path.isdir(conda_env_path)
def check_env():
# If we have access to conda, we are probably in an environment
conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0
if not conda_exist:
print("Conda is not installed. Exiting...")
sys.exit(1)
# Ensure this is a new environment and not the base environment
if os.environ["CONDA_DEFAULT_ENV"] == "base":
print("Create an environment for this project and activate it. Exiting...")
sys.exit(1)
def clear_cache():
print("clear_cache?? no...")
# run_cmd("conda clean -a -y", environment=True)
# run_cmd("python -m pip cache purge", environment=True)
def print_big_message(message):
message = message.strip()
lines = message.split('\n')
print("\n\n*******************************************************************")
for line in lines:
print("*", line)
print("*******************************************************************\n\n")
def calculate_file_hash(file_path):
p = os.path.join(script_dir, file_path)
if os.path.isfile(p):
with open(p, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
else:
return ''
def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
# Use the conda environment
if environment:
if is_windows():
conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat")
cmd = f'"{conda_bat_path}" activate "{conda_env_path}" >nul && {cmd}'
else:
conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh")
cmd = f'. "{conda_sh_path}" && conda activate "{conda_env_path}" && {cmd}'
# Run shell commands
result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env)
# Assert the command ran successfully
if assert_success and result.returncode != 0:
print(f"Command '{cmd}' failed with exit status code '{str(result.returncode)}'.\n\nExiting now.\nTry running the start/update script again.")
sys.exit(1)
return result
def generate_alphabetic_sequence(index):
result = ''
while index >= 0:
index, remainder = divmod(index, 26)
result = chr(ord('A') + remainder) + result
index -= 1
return result
def get_user_choice(question, options_dict):
print()
print(question)
print()
for key, value in options_dict.items():
print(f"{key}) {value}")
print()
choice = input("Input> ").upper()
while choice not in options_dict.keys():
print("Invalid choice. Please try again.")
choice = input("Input> ").upper()
return choice
def install_webui():
# Ask the user for the GPU vendor
if "GPU_CHOICE" in os.environ:
choice = os.environ["GPU_CHOICE"].upper()
print_big_message(f"Selected GPU choice \"{choice}\" based on the GPU_CHOICE environment variable.")
else:
choice = get_user_choice(
"What is your GPU?",
{
'A': 'NVIDIA GTX, RTX, Tesla',
'B': 'Intel Arc (IPEX)',
'C': 'CPU (I want to run models in CPU mode)'
},
)
gpu_choice_to_name = {
"A": "NVIDIA",
"B": "INTEL",
"C": "CPU"
}
selected_gpu = gpu_choice_to_name[choice]
# Find the Pytorch installation command
install_pytorch = f"python -m pip install torch=={TORCH_VERSION} torchvision=={TORCHVISION_VERSION} torchaudio=={TORCHAUDIO_VERSION} "
if selected_gpu == "NVIDIA":
# if is_cuda118 == 'Y':
# print("PyTorch CUDA 11.8")
# install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
# else:
print("PyTorch CUDA 12.1")
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
elif selected_gpu == "INTEL":
if is_linux():
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
else:
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
# Install Git and then Pytorch
print_big_message("Installing PyTorch.")
run_cmd(f"conda install -y -k ninja git && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
if selected_gpu == "INTEL":
# Install oneAPI dependencies via conda
print_big_message("Installing Intel oneAPI runtime libraries.")
run_cmd("conda install -y -c intel dpcpp-cpp-rt=2024.0 mkl-dpcpp=2024.0")
# Install libuv required by Intel-patched torch
run_cmd("conda install -y libuv")
# Install the webui requirements
update_requirements(initial_installation=True)
# cudnn & onnxruntime
update_cudnn()
clear_cache()
# ABUS
def update_requirements(initial_installation=False, pull=True):
# Update PyTorch
if not initial_installation:
update_pytorch()
# Detect the PyTorch version
torver = torch_version()
is_cuda = '+cu' in torver
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
is_cpu = '+cpu' in torver # 2.0.1+cpu
requirements_file = "requirements.txt"
print_big_message(f"Installing webui requirements from file: {requirements_file}")
print(f"TORCH: {torver}\n")
# Prepare the requirements file
textgen_requirements = open(requirements_file).read().splitlines()
if is_cuda118:
textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]
with open('temp_requirements.txt', 'w') as file:
file.write('\n'.join(textgen_requirements))
# Workaround for git+ packages not updating properly.
git_requirements = [req for req in textgen_requirements if req.startswith("git+")]
for req in git_requirements:
url = req.replace("git+", "")
package_name = url.split("/")[-1].split("@")[0].rstrip(".git")
run_cmd(f"python -m pip uninstall -y {package_name}", environment=True)
print(f"Uninstalled {package_name}")
# Install/update the project requirements
run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True)
os.remove('temp_requirements.txt')
# cudnn
# if is_cuda118:
# run_cmd("conda install -c conda-forge cudnn=8.4.1.50=hf5f08ae_0", assert_success=True, environment=True)
# run_cmd(f"python -m pip uninstall -y onnxruntime", environment=True)
# run_cmd(f"python -m pip install -y onnxruntime-gpu==1.17.1", environment=True)
# Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility
if not any((is_cuda, is_rocm)) and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1:
clear_cache()
return
# clear_cache()
# ABUS - start web ui
def launch_webui():
print("Start the program...")
run_cmd(f"python app/abus_app_kara.py", environment=True)
if __name__ == "__main__":
# Verifies we are in a conda environment
check_env()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--update-wizard', action='store_true', help='Launch a menu with update options.')
args, _ = parser.parse_known_args()
if not is_installed():
install_webui()
os.chdir(script_dir)
if os.environ.get("LAUNCH_AFTER_INSTALL", "").lower() in ("no", "n", "false", "0", "f", "off"):
print_big_message("Will now exit due to LAUNCH_AFTER_INSTALL.")
sys.exit()
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
conda_path_bin = os.path.join(conda_env_path, "bin")
if not os.path.exists(conda_path_bin):
os.mkdir(conda_path_bin)
# Launch the webui
launch_webui()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment