Skip to content

Instantly share code, notes, and snippets.

@johndpope
Last active March 30, 2023 09:16
Show Gist options
  • Save johndpope/512bfb17c62b64a0ea624f9923c02988 to your computer and use it in GitHub Desktop.
Save johndpope/512bfb17c62b64a0ea624f9923c02988 to your computer and use it in GitHub Desktop.
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/home/oem/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/home/oem/miniconda3/etc/profile.d/conda.sh" ]; then
. "/home/oem/miniconda3/etc/profile.d/conda.sh"
else
export PATH="/home/oem/miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
conda activate torch2
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export LD_LIBRARY_PATH=/usr/local/cuda-11.8/lib64
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.8/include
export PATH="/usr/local/cuda-11.8/bin:$PATH"
export LD_LIBRARY_PATH=$HOME/.miniconda3/envs/torch2/lib:$LD_LIBRARY_PATH
export LLVM_CONFIG=/usr/bin/
# cudnn-local-repo-ubuntu2204-8.7.0.84
# Needs cuda stuff
conda create -n torch2 python=3.9
conda activate torch2
conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch-nightly -c nvidia
conda install ffmpeg
pip install dlib-bin
pip install -U --pre xformers
pip install open_clip_torch
pip install taming-transformers-rom1504
pip install -r requirements.txt
# specific to my extensions ?
pip install k-diffusion pymongo dynamicprompts decord
pip uninstall gradio -y
pip install gradio
#!/usr/bin/env bash
# Author: Oleh Pshenychnyi
# Date: 13.02.2021
#
# Kill all processes matching a provided pattern.
#
# Usage:
#
# >> bash killer.sh celery
#
# or better to alias this script in your .bashrc/.zshrc
# so you can use it like:
#
# >> killer npm
# >> killer celery
# >> killer fuckingJava
victim_name=${1}
if [ "$victim_name" == "" ]
then
echo "Nope! Gimme a victim name."
exit
fi
output="$(ps ax | grep ${victim_name} | awk '{print $1,$3}')"
# at this point output looks like this:
# 254214 S
# 254215 S
# 254216 S
# 259206 S+
# 259207 S+
# we change internal field separator to use newline as a separator
_IFS=$IFS
IFS=$'\n'
pid_state_array=($output)
IFS=$_IFS
# pids to be killed
victim_pids=()
for pid_state in "${pid_state_array[@]}"; do
pid_state=($pid_state)
# we ignore the current process and its child
if [ "${pid_state[0]}" != $$ ] && [ "${pid_state[1]}" != "S+" ]
then
victim_pids+=("${pid_state[0]}")
fi
done
if [ "${#victim_pids[@]}" == 0 ]
then
echo "Nothing found for '${victim_name}'."
exit
fi
echo "Got them: ${victim_pids[@]}";
echo "$(kill -9 "${victim_pids[@]}" >/dev/null 2>&1)"
echo ".. and smashed!"
echo "killing existing python programs to free up vram"
./killer.sh python
nvidia-smi
python3 -c 'import torch; print(f"torch.cuda.get_device_properties(0).multi_processor_count:{torch.cuda.get_device_properties(0).multi_processor_count}")'
python3 -c 'import torch;import gradio;import torchvision;print(f"gradio {gradio.__version__},\ntorch {torch.__version__},\ntorchvision {torchvision.__version__}, \ncuda {torch.version.cuda}, \ncudnn {torch.backends.cudnn.version()}")'
echo "|===============================+======================+======================|"
python webui.py --opt-sdp-attention --opt-channelslast --api --listen --share
@johndpope
Copy link
Author

johndpope commented Mar 30, 2023

starting start.sh

Thu Mar 30 19:43:37 2023

+-----------------------------------------------------------------------------+
| NVIDIA-SMI 525.60.13    Driver Version: 525.60.13    CUDA Version: 12.0     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  NVIDIA GeForce ...  On   | 00000000:01:00.0  On |                  N/A |
|  0%   46C    P8    34W / 370W |  22117MiB / 24576MiB |     13%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
|    0   N/A  N/A      1745      G   /usr/lib/xorg/Xorg                202MiB |
|    0   N/A  N/A      1941      G   /usr/bin/gnome-shell               23MiB |
|    0   N/A  N/A      5286      G   ...261059061127987253,131072       71MiB |
|    0   N/A  N/A     50598      C   python                          21794MiB |
+-----------------------------------------------------------------------------+

will print out versions

|===============================+======================+======================|
torch.cuda.get_device_properties(0).multi_processor_count:82
gradio 3.23.0,
torch 2.0.0+cu117,
torchvision 0.15.1+cu117,  
cuda 11.7, 
cudnn 8500
|===============================+======================+======================|

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment