Skip to content

Instantly share code, notes, and snippets.

@jochemstoel
Created February 9, 2023 09:06
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jochemstoel/139b47f4ea6510bd0667961355fcd38f to your computer and use it in GitHub Desktop.
Save jochemstoel/139b47f4ea6510bd0667961355fcd38f to your computer and use it in GitHub Desktop.
fast-DreamBooth 20 jan.ipynb
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/jochemstoel/139b47f4ea6510bd0667961355fcd38f/fast-dreambooth-20-jan.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "qEsNHTtVlbkV"
},
"source": [
"# **fast-DreamBooth colab From https://github.com/TheLastBen/fast-stable-diffusion, if you face any issues, feel free to discuss them.** \n",
"Keep your notebook updated for best experience. [Support](https://ko-fi.com/thelastben)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "A4Bae3VP6UsE",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "77c48612-2ff6-4a9f-e698-4db2a6607f98"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mounted at /content/gdrive\n"
]
}
],
"source": [
"from google.colab import drive\n",
"drive.mount('/content/gdrive')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "QyvcqeiL65Tj",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "2596cdf6-0985-4efd-f36c-4f344f61b578"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[1;32mInstalling dependencies...\n",
"\u001b[1;32mDone, proceed\n"
]
}
],
"source": [
"#@markdown # Dependencies\n",
"\n",
"from IPython.utils import capture\n",
"import time\n",
"\n",
"print('\u001b[1;32mInstalling dependencies...')\n",
"with capture.capture_output() as cap:\n",
" %cd /content/\n",
" !pip install -q --no-deps accelerate==0.12.0\n",
" !wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/db.txt\n",
" !dpkg -i *.deb\n",
" !tar -C / --zstd -xf db_deps.tar.zst\n",
" !rm *.deb | rm *.zst | rm *.txt\n",
" !git clone --depth 1 --branch updt https://github.com/TheLastBen/diffusers\n",
"\n",
"print('\u001b[1;32mDone, proceed')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "R3SsbIlxw66N"
},
"source": [
"# Model Download"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "O3KHGKqyeJp9"
},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"from IPython.display import clear_output\n",
"import wget\n",
"\n",
"#@markdown - Skip this cell if you are loading a previous session that contains a trained model.\n",
"\n",
"#@markdown ---\n",
"\n",
"Model_Version = \"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n",
"\n",
"#@markdown - Choose which version to finetune.\n",
"\n",
"with capture.capture_output() as cap: \n",
" %cd /content/\n",
"\n",
"#@markdown ---\n",
"Custom_Model_Version=\"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n",
"\n",
"Path_to_HuggingFace= \"\" #@param {type:\"string\"}\n",
"\n",
"#@markdown - Load and finetune a model from Hugging Face, must specify if v2, use the format \"profile/model\" like : runwayml/stable-diffusion-v1-5\n",
"#@markdown - If the custom model is private or requires a token, create token.txt containing the token in \"Fast-Dreambooth\" folder in your gdrive.\n",
"\n",
"CKPT_Path = \"\" #@param {type:\"string\"}\n",
"\n",
"CKPT_Link = \"\" #@param {type:\"string\"}\n",
"\n",
"if os.path.exists('/content/gdrive/MyDrive/Fast-Dreambooth/token.txt'):\n",
" with open(\"/content/gdrive/MyDrive/Fast-Dreambooth/token.txt\") as f:\n",
" token = f.read()\n",
" authe=f'https://USER:{token}@'\n",
"else:\n",
" authe=\"https://\"\n",
"\n",
"def downloadmodel():\n",
"\n",
" if os.path.exists('/content/stable-diffusion-v1-5'):\n",
" !rm -r /content/stable-diffusion-v1-5\n",
" clear_output()\n",
"\n",
" %cd /content/\n",
" clear_output()\n",
" !mkdir /content/stable-diffusion-v1-5\n",
" %cd /content/stable-diffusion-v1-5\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"https://huggingface.co/runwayml/stable-diffusion-v1-5\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nmodel_index.json\\n!*.safetensors\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" if os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n",
" !git clone \"https://huggingface.co/stabilityai/sd-vae-ft-mse\"\n",
" !mv /content/stable-diffusion-v1-5/sd-vae-ft-mse /content/stable-diffusion-v1-5/vae\n",
" !rm -r /content/stable-diffusion-v1-5/.git\n",
" %cd /content/stable-diffusion-v1-5\n",
" !rm model_index.json\n",
" time.sleep(1) \n",
" wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n",
" !sed -i 's@\"clip_sample\": false@@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n",
" !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n",
" !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-v1-5/vae/config.json \n",
" %cd /content/\n",
" clear_output()\n",
" print('\u001b[1;32mDONE !')\n",
" else:\n",
" while not os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mSomething went wrong')\n",
" time.sleep(5)\n",
"\n",
"def newdownloadmodel():\n",
"\n",
" %cd /content/\n",
" clear_output()\n",
" !mkdir /content/stable-diffusion-v2-768\n",
" %cd /content/stable-diffusion-v2-768\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"https://huggingface.co/stabilityai/stable-diffusion-2-1\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nfeature_extractor\\nmodel_index.json\\n!*.safetensors\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" !rm -r /content/stable-diffusion-v2-768/.git\n",
" clear_output()\n",
" print('\u001b[1;32mDONE !')\n",
"\n",
"\n",
"def newdownloadmodelb():\n",
"\n",
" %cd /content/\n",
" clear_output()\n",
" !mkdir /content/stable-diffusion-v2-512\n",
" %cd /content/stable-diffusion-v2-512\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"https://huggingface.co/stabilityai/stable-diffusion-2-1-base\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nfeature_extractor\\nmodel_index.json\\n!*.safetensors\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" !rm -r /content/stable-diffusion-v2-512/.git\n",
" clear_output()\n",
" print('\u001b[1;32mDONE !')\n",
"\n",
"\n",
"if Path_to_HuggingFace != \"\":\n",
" if Custom_Model_Version=='V2.1-512px' or Custom_Model_Version=='V2.1-768px':\n",
" if os.path.exists('/content/stable-diffusion-custom'):\n",
" !rm -r /content/stable-diffusion-custom\n",
" clear_output()\n",
" %cd /content/\n",
" clear_output()\n",
" !mkdir /content/stable-diffusion-custom\n",
" %cd /content/stable-diffusion-custom\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"{authe}huggingface.co/{Path_to_HuggingFace}\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nfeature_extractor\\nmodel_index.json\\n!*.safetensors\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" !rm -r /content/stable-diffusion-custom/.git\n",
" %cd /content/ \n",
" MODEL_NAME=\"/content/stable-diffusion-custom\"\n",
" clear_output()\n",
" print('\u001b[1;32mDONE !')\n",
" else:\n",
" while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mCheck the link you provided')\n",
" time.sleep(5)\n",
" else:\n",
" if os.path.exists('/content/stable-diffusion-custom'):\n",
" !rm -r /content/stable-diffusion-custom\n",
" clear_output()\n",
" %cd /content/\n",
" clear_output()\n",
" !mkdir /content/stable-diffusion-custom\n",
" %cd /content/stable-diffusion-custom\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"{authe}huggingface.co/{Path_to_HuggingFace}\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\\n!*.safetensors\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" !rm -r /content/stable-diffusion-custom/.git\n",
" !rm model_index.json\n",
" time.sleep(1)\n",
" wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n",
" %cd /content/ \n",
" MODEL_NAME=\"/content/stable-diffusion-custom\"\n",
" clear_output()\n",
" print('\u001b[1;32mDONE !')\n",
" else:\n",
" while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mCheck the link you provided')\n",
" time.sleep(5)\n",
"\n",
"elif CKPT_Path !=\"\":\n",
" %cd /content\n",
" clear_output() \n",
" if os.path.exists(str(CKPT_Path)):\n",
" if Custom_Model_Version=='1.5':\n",
" !wget -q -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n",
" !unzip -o -q refmdlz\n",
" !rm -f refmdlz\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n",
" clear_output()\n",
" !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v1\n",
" !rm -r /content/refmdl\n",
" elif Custom_Model_Version=='V2.1-512px':\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n",
" clear_output()\n",
" !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n",
" elif Custom_Model_Version=='V2.1-768px':\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n",
" clear_output()\n",
" !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1\n",
" !rm /content/convertodiff.py\n",
" if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" clear_output()\n",
" MODEL_NAME=\"/content/stable-diffusion-custom\"\n",
" print('\u001b[1;32mDONE !')\n",
" else:\n",
" !rm -r /content/stable-diffusion-custom\n",
" while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mConversion error')\n",
" time.sleep(5)\n",
" else:\n",
" while not os.path.exists(str(CKPT_Path)):\n",
" print('\u001b[1;31mWrong path, use the colab file explorer to copy the path')\n",
" time.sleep(5)\n",
"\n",
"elif CKPT_Link !=\"\":\n",
" %cd /content\n",
" clear_output()\n",
" !gdown --fuzzy -O model.ckpt $CKPT_Link\n",
" clear_output() \n",
" if os.path.exists('/content/model.ckpt'):\n",
" if os.path.getsize(\"/content/model.ckpt\") > 1810671599:\n",
" if Custom_Model_Version=='1.5':\n",
" !wget -q -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n",
" !unzip -o -q refmdlz\n",
" !rm -f refmdlz\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n",
" clear_output()\n",
" !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v1\n",
" !rm -r /content/refmdl\n",
" elif Custom_Model_Version=='V2.1-512px':\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n",
" clear_output()\n",
" !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n",
" elif Custom_Model_Version=='V2.1-768px':\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n",
" clear_output()\n",
" !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1\n",
" !rm /content/convertodiff.py\n",
" if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" clear_output()\n",
" MODEL_NAME=\"/content/stable-diffusion-custom\"\n",
" print('\u001b[1;32mDONE !')\n",
" else:\n",
" !rm -r /content/stable-diffusion-custom\n",
" !rm /content/model.ckpt\n",
" while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mConversion error')\n",
" time.sleep(5)\n",
" else:\n",
" while os.path.getsize('/content/model.ckpt') < 1810671599:\n",
" print('\u001b[1;31mWrong link, check that the link is valid')\n",
" time.sleep(5)\n",
"\n",
"else:\n",
" if Model_Version==\"1.5\":\n",
" if not os.path.exists('/content/stable-diffusion-v1-5'):\n",
" downloadmodel()\n",
" MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n",
" else:\n",
" MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n",
" print(\"\u001b[1;32mThe v1.5 model already exists, using this model.\")\n",
" elif Model_Version==\"V2.1-512px\":\n",
" if not os.path.exists('/content/stable-diffusion-v2-512'):\n",
" newdownloadmodelb()\n",
" MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n",
" else:\n",
" MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n",
" print(\"\u001b[1;32mThe v2-512px model already exists, using this model.\")\n",
" elif Model_Version==\"V2.1-768px\":\n",
" if not os.path.exists('/content/stable-diffusion-v2-768'):\n",
" newdownloadmodel()\n",
" MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n",
" else:\n",
" MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n",
" print(\"\u001b[1;32mThe v2-768px model already exists, using this model.\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0tN76Cj5P3RL"
},
"source": [
"# Dreambooth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "A1B299g-_VJo",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "d025ce77-a207-422b-e6d8-d754f8a2b940"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[1;32mSession loaded.\n"
]
}
],
"source": [
"import os\n",
"from IPython.display import clear_output\n",
"from IPython.utils import capture\n",
"from os import listdir\n",
"from os.path import isfile\n",
"import wget\n",
"import time\n",
"\n",
"#@markdown #Create/Load a Session\n",
"\n",
"try:\n",
" MODEL_NAME\n",
" pass\n",
"except:\n",
" MODEL_NAME=\"\"\n",
" \n",
"PT=\"\"\n",
"\n",
"Session_Name = \"waveya\" #@param{type: 'string'}\n",
"while Session_Name==\"\":\n",
" print('\u001b[1;31mInput the Session Name:') \n",
" Session_Name=input('')\n",
"Session_Name=Session_Name.replace(\" \",\"_\")\n",
"\n",
"#@markdown - Enter the session name, it if it exists, it will load it, otherwise it'll create an new session.\n",
"\n",
"Session_Link_optional = \"\" #@param{type: 'string'}\n",
"\n",
"#@markdown - Import a session from another gdrive, the shared gdrive link must point to the specific session's folder that contains the trained CKPT, remove any intermediary CKPT if any.\n",
"\n",
"WORKSPACE='/content/gdrive/MyDrive/Fast-Dreambooth'\n",
"\n",
"if Session_Link_optional !=\"\":\n",
" print('\u001b[1;32mDownloading session...')\n",
" with capture.capture_output() as cap:\n",
" %cd /content\n",
" if not os.path.exists(str(WORKSPACE+'/Sessions')):\n",
" %mkdir -p $WORKSPACE'/Sessions'\n",
" time.sleep(1)\n",
" %cd $WORKSPACE'/Sessions'\n",
" !gdown --folder --remaining-ok -O $Session_Name $Session_Link_optional\n",
" %cd $Session_Name\n",
" !rm -r instance_images\n",
" !unzip instance_images.zip\n",
" !rm -r concept_images\n",
" !unzip concept_images.zip\n",
" !rm -r captions\n",
" !unzip captions.zip\n",
" %cd /content\n",
"\n",
"\n",
"INSTANCE_NAME=Session_Name\n",
"OUTPUT_DIR=\"/content/models/\"+Session_Name\n",
"SESSION_DIR=WORKSPACE+'/Sessions/'+Session_Name\n",
"INSTANCE_DIR=SESSION_DIR+'/instance_images'\n",
"CONCEPT_DIR=SESSION_DIR+'/concept_images'\n",
"CAPTIONS_DIR=SESSION_DIR+'/captions'\n",
"MDLPTH=str(SESSION_DIR+\"/\"+Session_Name+'.ckpt')\n",
"\n",
"Model_Version = \"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n",
"#@markdown - Ignore this if you're not loading a previous session that contains a trained model\n",
"\n",
"\n",
"if os.path.exists(str(SESSION_DIR)):\n",
" mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(\".\")[-1]==\"ckpt\"]\n",
" if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls): \n",
" \n",
" def f(n):\n",
" k=0\n",
" for i in mdls:\n",
" if k==n:\n",
" !mv \"$SESSION_DIR/$i\" $MDLPTH\n",
" k=k+1\n",
"\n",
" k=0\n",
" print('\u001b[1;33mNo final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\\n\u001b[1;34m')\n",
"\n",
" for i in mdls:\n",
" print(str(k)+'- '+i)\n",
" k=k+1\n",
" n=input()\n",
" while int(n)>k-1:\n",
" n=input()\n",
" if n!=\"000\":\n",
" f(int(n))\n",
" print('\u001b[1;32mUsing the model '+ mdls[int(n)]+\" ...\")\n",
" time.sleep(2)\n",
" else:\n",
" print('\u001b[1;32mSkipping the intermediary checkpoints.')\n",
" del n\n",
"\n",
"with capture.capture_output() as cap:\n",
" %cd /content\n",
" resume=False\n",
"\n",
"if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):\n",
" print('\u001b[1;32mLoading session with no previous model, using the original model or the custom downloaded model')\n",
" if MODEL_NAME==\"\":\n",
" print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n",
" else:\n",
" print('\u001b[1;32mSession Loaded, proceed to uploading instance images')\n",
"\n",
"elif os.path.exists(MDLPTH):\n",
" print('\u001b[1;32mSession found, loading the trained model ...')\n",
" if Model_Version=='1.5':\n",
" !wget -q -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n",
" !unzip -o -q refmdlz\n",
" !rm -f refmdlz\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n",
" clear_output()\n",
" print('\u001b[1;32mSession found, loading the trained model ...')\n",
" !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v1\n",
" !rm -r /content/refmdl\n",
" elif Model_Version=='V2.1-512px':\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n",
" clear_output()\n",
" print('\u001b[1;32mSession found, loading the trained model ...')\n",
" !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n",
" elif Model_Version=='V2.1-768px':\n",
" !wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n",
" clear_output()\n",
" print('\u001b[1;32mSession found, loading the trained model ...')\n",
" !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1\n",
" !rm /content/convertodiff.py \n",
" if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n",
" resume=True\n",
" clear_output()\n",
" print('\u001b[1;32mSession loaded.')\n",
" else: \n",
" if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mConversion error, if the error persists, remove the CKPT file from the current session folder')\n",
"\n",
"elif not os.path.exists(str(SESSION_DIR)):\n",
" %mkdir -p \"$INSTANCE_DIR\"\n",
" print('\u001b[1;32mCreating session...')\n",
" if MODEL_NAME==\"\":\n",
" print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n",
" else:\n",
" print('\u001b[1;32mSession created, proceed to uploading instance images')\n",
"\n",
" #@markdown\n",
"\n",
" #@markdown # The most importent step is to rename the instance pictures of each subject to a unique unknown identifier, example :\n",
" #@markdown - If you have 30 pictures of yourself, simply select them all and rename only one to the chosen identifier for example : phtmejhn, the files would be : phtmejhn (1).jpg, phtmejhn (2).png ....etc then upload them, do the same for other people or objects with a different identifier, and that's it.\n",
" #@markdown - Check out this example : https://i.imgur.com/d2lD3rz.jpeg"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "LC4ukG60fgMy"
},
"outputs": [],
"source": [
"import shutil\n",
"from google.colab import files\n",
"import time\n",
"from PIL import Image\n",
"from tqdm import tqdm\n",
"import ipywidgets as widgets\n",
"from io import BytesIO\n",
"import wget\n",
"\n",
"with capture.capture_output() as cap:\n",
" %cd /content\n",
" if not os.path.exists(\"/content/smart_crop.py\"):\n",
" wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/smart_crop.py')\n",
" from smart_crop import *\n",
"\n",
"#@markdown #Instance Images\n",
"#@markdown ----\n",
"\n",
"#@markdown\n",
"#@markdown - Run the cell to upload the instance pictures.\n",
"#@markdown - You can add `external captions` in txt files by simply giving each txt file the same name as the instance image, for example dikgur (1).jpg and dikgur (1).txt, and upload them here, to use the external captions, check the box \"external_captions\" in the training cell. `All the images must have one same extension` jpg or png or....etc\n",
"\n",
"Remove_existing_instance_images= True #@param{type: 'boolean'}\n",
"#@markdown - Uncheck the box to keep the existing instance images.\n",
"\n",
"if os.path.exists(CAPTIONS_DIR+\"off\"):\n",
" !mv $CAPTIONS_DIR\"off\" $CAPTIONS_DIR\n",
" time.sleep(3)\n",
"\n",
"if Remove_existing_instance_images:\n",
" if os.path.exists(str(INSTANCE_DIR)):\n",
" !rm -r \"$INSTANCE_DIR\"\n",
" if os.path.exists(str(CAPTIONS_DIR)):\n",
" !rm -r \"$CAPTIONS_DIR\"\n",
"\n",
"if not os.path.exists(str(INSTANCE_DIR)):\n",
" %mkdir -p \"$INSTANCE_DIR\"\n",
"if not os.path.exists(str(CAPTIONS_DIR)):\n",
" %mkdir -p \"$CAPTIONS_DIR\"\n",
"\n",
"if os.path.exists(INSTANCE_DIR+\"/.ipynb_checkpoints\"):\n",
" %rm -r $INSTANCE_DIR\"/.ipynb_checkpoints\"\n",
"\n",
"\n",
"IMAGES_FOLDER_OPTIONAL=\"\" #@param{type: 'string'}\n",
"\n",
"#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) instance images. Leave EMPTY to upload.\n",
"\n",
"Smart_Crop_images= True #@param{type: 'boolean'}\n",
"Crop_size = 768 #@param [\"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"] {type:\"raw\"}\n",
"\n",
"#@markdown - Smart crop the images without manual intervention.\n",
"\n",
"while IMAGES_FOLDER_OPTIONAL !=\"\" and not os.path.exists(str(IMAGES_FOLDER_OPTIONAL)):\n",
" print('\u001b[1;31mThe image folder specified does not exist, use the colab file explorer to copy the path :')\n",
" IMAGES_FOLDER_OPTIONAL=input('')\n",
"\n",
"if IMAGES_FOLDER_OPTIONAL!=\"\":\n",
" with capture.capture_output() as cap:\n",
" !cp $IMAGES_FOLDER_OPTIONAL/*.txt $CAPTIONS_DIR\n",
" if Smart_Crop_images:\n",
" for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" extension = filename.split(\".\")[-1]\n",
" identifier=filename.split(\".\")[0]\n",
" new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n",
" file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n",
" width, height = file.size\n",
" if file.size !=(Crop_size, Crop_size):\n",
" image=crop_image(file, Crop_size)\n",
" if extension.upper() == \"JPG\" or \"jpg\":\n",
" image[0].save(new_path_with_file, format=\"JPEG\", quality = 100)\n",
" else:\n",
" image[0].save(new_path_with_file, format=extension.upper())\n",
" else:\n",
" !cp \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n",
"\n",
" else:\n",
" for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n",
"\n",
" print('\\n\u001b[1;32mDone, proceed to the next cell')\n",
"\n",
"\n",
"elif IMAGES_FOLDER_OPTIONAL ==\"\":\n",
" up=\"\"\n",
" uploaded = files.upload()\n",
" for filename in uploaded.keys():\n",
" if filename.split(\".\")[-1]==\"txt\":\n",
" shutil.move(filename, CAPTIONS_DIR)\n",
" up=[filename for filename in uploaded.keys() if filename.split(\".\")[-1]!=\"txt\"]\n",
" if Smart_Crop_images:\n",
" for filename in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" shutil.move(filename, INSTANCE_DIR)\n",
" extension = filename.split(\".\")[-1]\n",
" identifier=filename.split(\".\")[0]\n",
" new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n",
" file = Image.open(new_path_with_file)\n",
" width, height = file.size\n",
" if file.size !=(Crop_size, Crop_size):\n",
" image=crop_image(file, Crop_size)\n",
" if extension.upper() == \"JPG\" or \"jpg\":\n",
" image[0].save(new_path_with_file, format=\"JPEG\", quality = 100)\n",
" else:\n",
" image[0].save(new_path_with_file, format=extension.upper())\n",
" clear_output()\n",
" else:\n",
" for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" shutil.move(filename, INSTANCE_DIR)\n",
" clear_output()\n",
" print('\\n\u001b[1;32mDone, proceed to the next cell')\n",
"\n",
"with capture.capture_output() as cap:\n",
" %cd \"$INSTANCE_DIR\"\n",
" !find . -name \"* *\" -type f | rename 's/ /-/g'\n",
" %cd \"$CAPTIONS_DIR\"\n",
" !find . -name \"* *\" -type f | rename 's/ /-/g'\n",
" \n",
" %cd $SESSION_DIR\n",
" !rm instance_images.zip captions.zip\n",
" !zip -r instance_images instance_images\n",
" !zip -r captions captions\n",
" %cd /content"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Baw78R-w4T2j"
},
"outputs": [],
"source": [
"import ipywidgets as widgets\n",
"from io import BytesIO\n",
"#@markdown #Captions\n",
"\n",
"#@markdown - Open a tool to manually `create` captions or edit existing captions of the instance images.\n",
"\n",
"paths=\"\"\n",
"out=\"\"\n",
"widgets_l=\"\"\n",
"clear_output()\n",
"def Caption(path):\n",
" if path!=\"Select an instance image to caption\":\n",
" \n",
" name = os.path.splitext(os.path.basename(path))[0]\n",
" ext=os.path.splitext(os.path.basename(path))[-1][1:]\n",
" if ext==\"jpg\" or \"JPG\":\n",
" ext=\"JPEG\" \n",
"\n",
" if os.path.exists(CAPTIONS_DIR+\"/\"+name + '.txt'):\n",
" with open(CAPTIONS_DIR+\"/\"+name + '.txt', 'r') as f:\n",
" text = f.read()\n",
" else:\n",
" with open(CAPTIONS_DIR+\"/\"+name + '.txt', 'w') as f:\n",
" f.write(\"\")\n",
" with open(CAPTIONS_DIR+\"/\"+name + '.txt', 'r') as f:\n",
" text = f.read() \n",
"\n",
" img=Image.open(os.path.join(INSTANCE_DIR,path))\n",
" img=img.resize((420, 420))\n",
" image_bytes = BytesIO()\n",
" img.save(image_bytes, format=ext, qualiy=10)\n",
" image_bytes.seek(0)\n",
" image_data = image_bytes.read()\n",
" img= image_data \n",
" image = widgets.Image(\n",
" value=img,\n",
" width=420,\n",
" height=420\n",
" )\n",
" text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})\n",
" \n",
"\n",
" def update_text(text):\n",
" with open(CAPTIONS_DIR+\"/\"+name + '.txt', 'w') as f:\n",
" f.write(text)\n",
"\n",
" button = widgets.Button(description='Save', button_style='success')\n",
" button.on_click(lambda b: update_text(text_area.value))\n",
"\n",
" return widgets.VBox([widgets.HBox([image, text_area, button])])\n",
"\n",
"\n",
"paths = os.listdir(INSTANCE_DIR)\n",
"widgets_l = widgets.Select(options=[\"Select an instance image to caption\"]+paths, rows=25)\n",
"\n",
"\n",
"out = widgets.Output()\n",
"\n",
"def click(change):\n",
" with out:\n",
" out.clear_output()\n",
" display(Caption(change.new))\n",
"\n",
"widgets_l.observe(click, names='value')\n",
"display(widgets.HBox([widgets_l, out]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "LxEv3u8mQos3"
},
"outputs": [],
"source": [
"import shutil\n",
"from google.colab import files\n",
"from PIL import Image\n",
"from tqdm import tqdm\n",
"\n",
"#@markdown #Concept Images (Regularization)\n",
"#@markdown ----\n",
"\n",
"#@markdown\n",
"#@markdown - Run this `optional` cell to upload concept pictures. If you're traning on a specific face, skip this cell.\n",
"#@markdown - Training a model on a restricted number of instance images tends to indoctrinate it and limit its imagination, so concept images help re-opening its \"mind\" to diversity and greatly widen the range of possibilities of the output, concept images should contain anything related to the instance pictures, including objects, ideas, scenes, phenomenons, concepts (obviously), don't be afraid to slightly diverge from the trained style.\n",
"\n",
"Remove_existing_concept_images= True #@param{type: 'boolean'}\n",
"#@markdown - Uncheck the box to keep the existing concept images.\n",
"\n",
"\n",
"if Remove_existing_concept_images:\n",
" if os.path.exists(str(CONCEPT_DIR)):\n",
" !rm -r \"$CONCEPT_DIR\"\n",
"\n",
"if not os.path.exists(str(CONCEPT_DIR)):\n",
" %mkdir -p \"$CONCEPT_DIR\"\n",
"\n",
"IMAGES_FOLDER_OPTIONAL=\"\" #@param{type: 'string'}\n",
"\n",
"#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) concept images. Leave EMPTY to upload.\n",
"\n",
"Smart_Crop_images= True\n",
"Crop_size = 512\n",
"\n",
"while IMAGES_FOLDER_OPTIONAL !=\"\" and not os.path.exists(str(IMAGES_FOLDER_OPTIONAL)):\n",
" print('\u001b[1;31mThe image folder specified does not exist, use the colab file explorer to copy the path :')\n",
" IMAGES_FOLDER_OPTIONAL=input('')\n",
"\n",
"if IMAGES_FOLDER_OPTIONAL!=\"\":\n",
" if Smart_Crop_images:\n",
" for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" extension = filename.split(\".\")[-1]\n",
" identifier=filename.split(\".\")[0]\n",
" new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n",
" file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n",
" width, height = file.size\n",
" if file.size !=(Crop_size, Crop_size):\n",
" image=crop_image(file, Crop_size)\n",
" if extension.upper() == \"JPG\" or \"jpg\":\n",
" image[0].save(new_path_with_file, format=\"JPEG\", quality = 100)\n",
" else:\n",
" image[0].save(new_path_with_file, format=extension.upper())\n",
" else:\n",
" !cp \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n",
"\n",
" else:\n",
" for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n",
"\n",
"elif IMAGES_FOLDER_OPTIONAL ==\"\":\n",
" uploaded = files.upload()\n",
" if Smart_Crop_images:\n",
" for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" shutil.move(filename, CONCEPT_DIR)\n",
" extension = filename.split(\".\")[-1]\n",
" identifier=filename.split(\".\")[0]\n",
" new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n",
" file = Image.open(new_path_with_file)\n",
" width, height = file.size\n",
" if file.size !=(Crop_size, Crop_size):\n",
" image=crop_image(file, Crop_size)\n",
" if extension.upper() == \"JPG\" or \"jpg\":\n",
" image[0].save(new_path_with_file, format=\"JPEG\", quality = 100)\n",
" else:\n",
" image[0].save(new_path_with_file, format=extension.upper())\n",
" clear_output()\n",
" else:\n",
" for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n",
" shutil.move(filename, CONCEPT_DIR)\n",
" clear_output()\n",
"\n",
"\n",
"print('\\n\u001b[1;32mAlmost done...')\n",
"with capture.capture_output() as cap:\n",
" i=0\n",
" for filename in os.listdir(CONCEPT_DIR):\n",
" extension = filename.split(\".\")[-1]\n",
" identifier=filename.split(\".\")[0]\n",
" new_path_with_file = os.path.join(CONCEPT_DIR, \"conceptimagedb\"+str(i)+\".\"+extension)\n",
" filepath=os.path.join(CONCEPT_DIR,filename)\n",
" !mv \"$filepath\" $new_path_with_file\n",
" i=i+1\n",
"\n",
" %cd $SESSION_DIR\n",
" !rm concept_images.zip\n",
" !zip -r concept_images concept_images\n",
" %cd /content\n",
"\n",
"print('\\n\u001b[1;32mDone, proceed to the training cell')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZnmQYfZilzY6"
},
"source": [
"# Training"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "1-9QbkfAVYYU",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "ead812d2-c90f-4587-a672-36e66076af00"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[1;32mResuming Training...\u001b[0m\n",
"\u001b[1;33mTraining the UNet...\u001b[0m\n",
"\u001b[34m'########:'########:::::'###::::'####:'##::: ##:'####:'##::: ##::'######:::\n",
"... ##..:: ##.... ##:::'## ##:::. ##:: ###:: ##:. ##:: ###:: ##:'##... ##::\n",
"::: ##:::: ##:::: ##::'##:. ##::: ##:: ####: ##:: ##:: ####: ##: ##:::..:::\n",
"::: ##:::: ########::'##:::. ##:: ##:: ## ## ##:: ##:: ## ## ##: ##::'####:\n",
"::: ##:::: ##.. ##::: #########:: ##:: ##. ####:: ##:: ##. ####: ##::: ##::\n",
"::: ##:::: ##::. ##:: ##.... ##:: ##:: ##:. ###:: ##:: ##:. ###: ##::: ##::\n",
"::: ##:::: ##:::. ##: ##:::: ##:'####: ##::. ##:'####: ##::. ##:. ######:::\n",
":::..:::::..:::::..::..:::::..::....::..::::..::....::..::::..:::......::::\n",
"\u001b[0m\n",
"Progress:|███ | 12% 1249/10000 [30:29<3:32:26, 1.46s/it, loss=0.155, lr=4.39e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|██████ | 22% 2249/10000 [54:57<3:08:04, 1.46s/it, loss=0.2, lr=3.9e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|████████ | 32% 3249/10000 [1:19:24<2:43:27, 1.45s/it, loss=0.0309, lr=3.41e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|██████████ | 42% 4249/10000 [1:43:52<2:19:25, 1.45s/it, loss=0.00711, lr=2.92e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|█████████████ | 52% 5249/10000 [2:08:19<1:55:03, 1.45s/it, loss=0.0245, lr=2.43e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|████████████████ | 62% 6249/10000 [2:32:46<1:30:44, 1.45s/it, loss=0.298, lr=1.94e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|██████████████████ | 72% 7249/10000 [2:57:15<1:06:45, 1.46s/it, loss=0.00783, lr=1.45e-6] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|████████████████████ | 82% 8249/10000 [3:21:43<42:24, 1.45s/it, loss=0.0026, lr=9.58e-7] \u001b[1;32mSAVING CHECKPOINT...\n",
"Done, resuming training ...\u001b[0m\n",
"Progress:|███████████████████████ | 91% 9088/10000 [3:42:22<22:06, 1.45s/it, loss=0.0403, lr=5.47e-7] \u001b[0;32mari \u001b[0m"
]
}
],
"source": [
"#@markdown ---\n",
"#@markdown #Start DreamBooth\n",
"#@markdown ---\n",
"import os\n",
"from subprocess import getoutput\n",
"from IPython.display import clear_output\n",
"from google.colab import runtime\n",
"import time\n",
"import random\n",
"\n",
"if os.path.exists(INSTANCE_DIR+\"/.ipynb_checkpoints\"):\n",
" %rm -r $INSTANCE_DIR\"/.ipynb_checkpoints\"\n",
"\n",
"if os.path.exists(CONCEPT_DIR+\"/.ipynb_checkpoints\"):\n",
" %rm -r $CONCEPT_DIR\"/.ipynb_checkpoints\"\n",
"\n",
"if os.path.exists(CAPTIONS_DIR+\"/.ipynb_checkpoints\"):\n",
" %rm -r $CAPTIONS_DIR\"/.ipynb_checkpoints\"\n",
"\n",
"if os.path.exists(CAPTIONS_DIR+\"off\"):\n",
" !mv $CAPTIONS_DIR\"off\" $CAPTIONS_DIR\n",
"\n",
"Resume_Training = True #@param {type:\"boolean\"}\n",
"\n",
"if resume and not Resume_Training:\n",
" print('\u001b[1;31mOverwrite your previously trained model ?, answering \"yes\" will train a new model, answering \"no\" will resume the training of the previous model?  yes or no ?\u001b[0m')\n",
" while True:\n",
" ansres=input('')\n",
" if ansres=='no':\n",
" Resume_Training = True\n",
" break\n",
" elif ansres=='yes':\n",
" Resume_Training = False\n",
" resume= False\n",
" break\n",
"\n",
"while not Resume_Training and MODEL_NAME==\"\":\n",
" print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n",
" time.sleep(5)\n",
"\n",
"#@markdown - If you're not satisfied with the result, check this box, run again the cell and it will continue training the current model.\n",
"\n",
"MODELT_NAME=MODEL_NAME\n",
"\n",
"UNet_Training_Steps=10000 #@param{type: 'number'}\n",
"UNet_Learning_Rate = 5e-6 #@param [\"2e-5\",\"1e-5\",\"9e-6\",\"8e-6\",\"7e-6\",\"6e-6\",\"5e-6\", \"4e-6\", \"3e-6\", \"2e-6\"] {type:\"raw\"}\n",
"untlr=UNet_Learning_Rate\n",
"\n",
"#@markdown - These default settings are for a dataset of 10 pictures which is enough for training a face, start with 650 or lower, test the model, if not enough, resume training for 150 steps, keep testing until you get the desired output, `set it to 0 to train only the text_encoder`.\n",
"\n",
"Text_Encoder_Training_Steps=0 #@param{type: 'number'}\n",
"\n",
"#@markdown - 200-450 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, `set it to 0 before resuming training if it is already trained`.\n",
"\n",
"Text_Encoder_Concept_Training_Steps=0 #@param{type: 'number'}\n",
"\n",
"#@markdown - Suitable for training a style/concept as it acts as heavy regularization, set it to 1500 steps for 200 concept images (you can go higher), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, `set it to 0 before resuming training if it is already trained`.\n",
"\n",
"Text_Encoder_Learning_Rate = 1e-6 #@param [\"2e-6\", \"1e-6\",\"8e-7\",\"6e-7\",\"5e-7\",\"4e-7\"] {type:\"raw\"}\n",
"txlr=Text_Encoder_Learning_Rate\n",
"\n",
"#@markdown - Learning rate for both text_encoder and concept_text_encoder, keep it low to avoid overfitting (1e-6 is higher than 4e-7)\n",
"\n",
"trnonltxt=\"\"\n",
"if UNet_Training_Steps==0:\n",
" trnonltxt=\"--train_only_text_encoder\"\n",
"\n",
"Seed=''\n",
"\n",
"External_Captions = False #@param {type:\"boolean\"}\n",
"#@markdown - Get the captions from a text file for each instance image.\n",
"extrnlcptn=\"\"\n",
"if External_Captions:\n",
" extrnlcptn=\"--external_captions\"\n",
"\n",
"\n",
"Style_Training = False #@param {type:\"boolean\"}\n",
"\n",
"#@markdown - Further reduce overfitting, suitable when training a style or a general theme, don't check the box at the beginning, check it after training for at least 1000 steps. (Has no effect when using External Captions)\n",
"\n",
"Style=\"\"\n",
"if Style_Training:\n",
" Style=\"--Style\"\n",
"\n",
"Resolution = \"768\" #@param [\"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"]\n",
"Res=int(Resolution)\n",
"\n",
"#@markdown - Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n",
"\n",
"fp16 = True\n",
"\n",
"if Seed =='' or Seed=='0':\n",
" Seed=random.randint(1, 999999)\n",
"else:\n",
" Seed=int(Seed)\n",
"\n",
"GC=\"--gradient_checkpointing\"\n",
"\n",
"if fp16:\n",
" prec=\"fp16\"\n",
"else:\n",
" prec=\"no\"\n",
"\n",
"s = getoutput('nvidia-smi')\n",
"if 'A100' in s:\n",
" GC=\"\"\n",
"\n",
"precision=prec\n",
"\n",
"resuming=\"\"\n",
"if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n",
" MODELT_NAME=OUTPUT_DIR\n",
" print('\u001b[1;32mResuming Training...\u001b[0m')\n",
" resuming=\"Yes\"\n",
"elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n",
" print('\u001b[1;31mPrevious model not found, training a new model...\u001b[0m')\n",
" MODELT_NAME=MODEL_NAME\n",
" while MODEL_NAME==\"\":\n",
" print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n",
" time.sleep(5)\n",
"\n",
"Enable_text_encoder_training= True\n",
"Enable_Text_Encoder_Concept_Training= True\n",
"\n",
"if Text_Encoder_Training_Steps==0 or External_Captions:\n",
" Enable_text_encoder_training= False\n",
"else:\n",
" stptxt=Text_Encoder_Training_Steps\n",
"\n",
"if Text_Encoder_Concept_Training_Steps==0:\n",
" Enable_Text_Encoder_Concept_Training= False\n",
"else:\n",
" stptxtc=Text_Encoder_Concept_Training_Steps\n",
"\n",
"#@markdown ---------------------------\n",
"Save_Checkpoint_Every_n_Steps = True #@param {type:\"boolean\"}\n",
"Save_Checkpoint_Every=1000 #@param{type: 'number'}\n",
"if Save_Checkpoint_Every==None:\n",
" Save_Checkpoint_Every=1\n",
"#@markdown - Minimum 200 steps between each save.\n",
"stp=0\n",
"Start_saving_from_the_step=1250 #@param{type: 'number'}\n",
"if Start_saving_from_the_step==None:\n",
" Start_saving_from_the_step=0\n",
"if (Start_saving_from_the_step < 200):\n",
" Start_saving_from_the_step=Save_Checkpoint_Every\n",
"stpsv=Start_saving_from_the_step\n",
"if Save_Checkpoint_Every_n_Steps:\n",
" stp=Save_Checkpoint_Every\n",
"#@markdown - Start saving intermediary checkpoints from this step.\n",
"\n",
"Disconnect_after_training=False #@param {type:\"boolean\"}\n",
"\n",
"#@markdown - Auto-disconnect from google colab after the training to avoid wasting compute units.\n",
"\n",
"def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):\n",
" \n",
" !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n",
" $trnonltxt \\\n",
" --image_captions_filename \\\n",
" --train_text_encoder \\\n",
" --dump_only_text_encoder \\\n",
" --pretrained_model_name_or_path=\"$MODELT_NAME\" \\\n",
" --instance_data_dir=\"$INSTANCE_DIR\" \\\n",
" --output_dir=\"$OUTPUT_DIR\" \\\n",
" --instance_prompt=\"$PT\" \\\n",
" --seed=$Seed \\\n",
" --resolution=512 \\\n",
" --mixed_precision=$precision \\\n",
" --train_batch_size=1 \\\n",
" --gradient_accumulation_steps=1 $GC \\\n",
" --use_8bit_adam \\\n",
" --learning_rate=$txlr \\\n",
" --lr_scheduler=\"polynomial\" \\\n",
" --lr_warmup_steps=0 \\\n",
" --max_train_steps=$Training_Steps\n",
"\n",
"def train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps):\n",
" clear_output()\n",
" if resuming==\"Yes\":\n",
" print('\u001b[1;32mResuming Training...\u001b[0m')\n",
" print('\u001b[1;33mTraining the UNet...\u001b[0m')\n",
" !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n",
" $Style \\\n",
" $extrnlcptn \\\n",
" --stop_text_encoder_training=$Text_Encoder_Training_Steps \\\n",
" --image_captions_filename \\\n",
" --train_only_unet \\\n",
" --save_starting_step=$stpsv \\\n",
" --save_n_steps=$stp \\\n",
" --Session_dir=$SESSION_DIR \\\n",
" --pretrained_model_name_or_path=\"$MODELT_NAME\" \\\n",
" --instance_data_dir=\"$INSTANCE_DIR\" \\\n",
" --output_dir=\"$OUTPUT_DIR\" \\\n",
" --captions_dir=\"$CAPTIONS_DIR\" \\\n",
" --instance_prompt=\"$PT\" \\\n",
" --seed=$Seed \\\n",
" --resolution=$Res \\\n",
" --mixed_precision=$precision \\\n",
" --train_batch_size=1 \\\n",
" --gradient_accumulation_steps=1 $GC \\\n",
" --use_8bit_adam \\\n",
" --learning_rate=$untlr \\\n",
" --lr_scheduler=\"polynomial\" \\\n",
" --lr_warmup_steps=0 \\\n",
" --max_train_steps=$Training_Steps\n",
"\n",
"\n",
"if Enable_text_encoder_training :\n",
" print('\u001b[1;33mTraining the text encoder...\u001b[0m')\n",
" if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):\n",
" %rm -r $OUTPUT_DIR\"/text_encoder_trained\"\n",
" dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)\n",
"\n",
"if Enable_Text_Encoder_Concept_Training:\n",
" if os.path.exists(CONCEPT_DIR):\n",
" if os.listdir(CONCEPT_DIR)!=[]:\n",
" clear_output()\n",
" if resuming==\"Yes\":\n",
" print('\u001b[1;32mResuming Training...\u001b[0m')\n",
" print('\u001b[1;33mTraining the text encoder on the concept...\u001b[0m')\n",
" dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)\n",
" else:\n",
" clear_output()\n",
" if resuming==\"Yes\":\n",
" print('\u001b[1;32mResuming Training...\u001b[0m')\n",
" print('\u001b[1;31mNo concept images found, skipping concept training...')\n",
" Text_Encoder_Concept_Training_Steps=0\n",
" time.sleep(8)\n",
" else:\n",
" clear_output()\n",
" if resuming==\"Yes\":\n",
" print('\u001b[1;32mResuming Training...\u001b[0m')\n",
" print('\u001b[1;31mNo concept images found, skipping concept training...')\n",
" Text_Encoder_Concept_Training_Steps=0\n",
" time.sleep(8)\n",
"\n",
"if UNet_Training_Steps!=0:\n",
" train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps=UNet_Training_Steps)\n",
"\n",
"if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :\n",
" print('\u001b[1;32mNothing to do')\n",
"else:\n",
" if os.path.exists('/content/models/'+INSTANCE_NAME+'/unet/diffusion_pytorch_model.bin'):\n",
" prc=\"--fp16\" if precision==\"fp16\" else \"\"\n",
" !python /content/diffusers/scripts/convertosdv2.py $prc $OUTPUT_DIR $SESSION_DIR/$Session_Name\".ckpt\"\n",
" clear_output()\n",
" if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'):\n",
" clear_output()\n",
" print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n",
" if Disconnect_after_training :\n",
" time.sleep(20)\n",
" runtime.unassign()\n",
" else:\n",
" print(\"\u001b[1;31mSomething went wrong\")\n",
" else:\n",
" print(\"\u001b[1;31mSomething went wrong\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ehi1KKs-l-ZS"
},
"source": [
"# Test The Trained Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "iAZGngFcI8hq",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "b1f6d813-27ef-4633-a3d9-080ff166426d"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"LatentDiffusion: Running in eps-prediction mode\n",
"DiffusionWrapper has 859.52 M params.\n",
"Loading weights [993f2358] from /content/gdrive/MyDrive/Fast-Dreambooth/Sessions/waveya/waveya_step_8250.ckpt\n",
"Applying xformers cross attention optimization.\n",
"Textual inversion embeddings loaded(0): \n",
"Model loaded in 30.3s (0.9s create model, 28.8s load weights).\n",
"Running on public URL: https://7d5b4da6be238ace.gradio.app\n",
"\u001b[32m✔ Connected\n",
"100% 77/77 [00:41<00:00, 1.87it/s]\n",
"100% 77/77 [00:33<00:00, 2.28it/s]\n",
"100% 77/77 [00:33<00:00, 2.27it/s]\n",
"100% 77/77 [00:33<00:00, 2.28it/s]\n",
"100% 77/77 [00:33<00:00, 2.27it/s]\n",
"100% 77/77 [00:33<00:00, 2.29it/s]\n",
" 77% 59/77 [00:25<00:08, 2.24it/s]"
]
}
],
"source": [
"import os\n",
"import time\n",
"import sys\n",
"import fileinput\n",
"from IPython.display import clear_output\n",
"from subprocess import getoutput\n",
"from IPython.utils import capture\n",
"\n",
"\n",
"Model_Version = \"1.5\" #@param [\"1.5\", \"V2.1-512\", \"V2.1-768\"]\n",
"#@markdown - Important! Choose the correct version and resolution of the model\n",
"\n",
"Previous_Session=\"\" #@param{type: 'string'}\n",
"\n",
"#@markdown - Leave empty if you want to use the current trained model.\n",
"\n",
"Use_Custom_Path = True #@param {type:\"boolean\"}\n",
"\n",
"try:\n",
" INSTANCE_NAME\n",
" INSTANCET=INSTANCE_NAME\n",
"except:\n",
" pass\n",
"#@markdown - if checked, an input box will ask the full path to a desired model.\n",
"\n",
"if Previous_Session!=\"\":\n",
" INSTANCET=Previous_Session\n",
" INSTANCET=INSTANCET.replace(\" \",\"_\")\n",
"\n",
"if Use_Custom_Path:\n",
" try:\n",
" INSTANCET\n",
" del INSTANCET\n",
" except:\n",
" pass\n",
"\n",
"try:\n",
" INSTANCET\n",
" if Previous_Session!=\"\":\n",
" path_to_trained_model='/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/'+Previous_Session+\"/\"+Previous_Session+'.ckpt'\n",
" else:\n",
" path_to_trained_model=SESSION_DIR+\"/\"+INSTANCET+'.ckpt'\n",
"except:\n",
" print('\u001b[1;31mIt seems that you did not perform training during this session \u001b[1;32mor you chose to use a custom path,\\nprovide the full path to the model (including the name of the model):\\n')\n",
" path_to_trained_model=input()\n",
" \n",
"while not os.path.exists(path_to_trained_model):\n",
" print(\"\u001b[1;31mThe model doesn't exist on you Gdrive, use the file explorer to get the path : \")\n",
" path_to_trained_model=input()\n",
" \n",
"fgitclone = \"git clone --depth 1\"\n",
"\n",
"with capture.capture_output() as cap:\n",
" if not os.path.exists('/content/gdrive/MyDrive/Fast-Dreambooth'):\n",
" !mkdir -p /content/gdrive/MyDrive/Fast-Dreambooth\n",
" time.sleep(2)\n",
" %mkdir -p /content/gdrive/MyDrive/Fast-Dreambooth/sd_db\n",
" %cd /content/gdrive/MyDrive/Fast-Dreambooth/sd_db\n",
" !$fgitclone --branch main https://github.com/Stability-AI/stablediffusion\n",
" !$fgitclone --branch Colabdb https://github.com/TheLastBen/stable-diffusion-webui\n",
" !mkdir -p /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stable-diffusion-webui/cache/huggingface\n",
" !ln -s /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stable-diffusion-webui/cache/huggingface /root/.cache/\n",
" %cd /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stable-diffusion-webui\n",
"\n",
"clear_output()\n",
"print('\u001b[1;32m')\n",
"!git pull\n",
"\n",
"with capture.capture_output() as cap:\n",
" if not os.path.exists('/content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stablediffusion/src/k-diffusion/k_diffusion'):\n",
" !mkdir /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stablediffusion/src\n",
" %cd /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stablediffusion/src\n",
" !$fgitclone https://github.com/TheLastBen/taming-transformers\n",
" !$fgitclone https://github.com/salesforce/BLIP blip\n",
" !$fgitclone https://github.com/sczhou/CodeFormer codeformer\n",
" !$fgitclone --branch master https://github.com/crowsonkb/k-diffusion\n",
"\n",
"with capture.capture_output() as cap:\n",
" if not os.path.exists('/tools/node/bin/lt'):\n",
" !npm install -g localtunnel\n",
"\n",
"Use_localtunnel = False #@param {type:\"boolean\"}\n",
"\n",
"User = \"\" #@param {type:\"string\"}\n",
"Password= \"\" #@param {type:\"string\"}\n",
"#@markdown - Add credentials to your Gradio interface (optional).\n",
"\n",
"auth=f\"--gradio-auth {User}:{Password}\"\n",
"if User ==\"\" or Password==\"\":\n",
" auth=\"\"\n",
"\n",
"share=''\n",
"if not Use_localtunnel:\n",
" share='--share'\n",
" !wget -q -O /usr/local/lib/python3.8/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/blocks.py\n",
"\n",
"else:\n",
" with capture.capture_output() as cap:\n",
" share=''\n",
" %cd /content\n",
" !nohup lt --port 7860 > srv.txt 2>&1 &\n",
" time.sleep(2)\n",
" !grep -o 'https[^ ]*' /content/srv.txt >srvr.txt\n",
" time.sleep(2)\n",
" srv= getoutput('cat /content/srvr.txt')\n",
"\n",
" for line in fileinput.input('/usr/local/lib/python3.8/dist-packages/gradio/blocks.py', inplace=True):\n",
" if line.strip().startswith('self.server_name ='):\n",
" line = f' self.server_name = \"{srv[8:]}\"\\n'\n",
" if line.strip().startswith('self.protocol = \"https\"'):\n",
" line = ' self.protocol = \"https\"\\n'\n",
" if line.strip().startswith('if self.local_url.startswith(\"https\") or self.is_colab'):\n",
" line = ''\n",
" if line.strip().startswith('else \"http\"'):\n",
" line = ''\n",
" sys.stdout.write(line)\n",
"\n",
" !rm /content/srv.txt\n",
" !rm /content/srvr.txt\n",
" %cd /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stable-diffusion-webui\n",
"\n",
"if Model_Version == \"V2.1-768\":\n",
" configf=\"--config /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stablediffusion/configs/stable-diffusion/v2-inference-v.yaml\"\n",
"elif Model_Version == \"V2.1-512\":\n",
" configf=\"--config /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stablediffusion/configs/stable-diffusion/v2-inference.yaml\"\n",
"else:\n",
" configf=\"\"\n",
"\n",
"clear_output()\n",
"\n",
"if os.path.isfile(path_to_trained_model):\n",
" !python /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stable-diffusion-webui/webui.py $share --ckpt \"$path_to_trained_model\" $configf $auth\n",
"else:\n",
" !python /content/gdrive/MyDrive/Fast-Dreambooth/sd_db/stable-diffusion-webui/webui.py $share --ckpt-dir \"$path_to_trained_model\" $configf $auth"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "d_mQ23XsOc5R"
},
"source": [
"# Upload The Trained Model to Hugging Face "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "NTqUIuhROdH4"
},
"outputs": [],
"source": [
"from slugify import slugify\n",
"from huggingface_hub import HfApi, HfFolder, CommitOperationAdd\n",
"from huggingface_hub import create_repo\n",
"from IPython.display import display_markdown\n",
"from IPython.display import clear_output\n",
"from IPython.utils import capture\n",
"from google.colab import files\n",
"import shutil\n",
"import time\n",
"import os\n",
"\n",
"Upload_sample_images = False #@param {type:\"boolean\"}\n",
"#@markdown - Upload showcase images of your trained model\n",
"\n",
"Name_of_your_concept = \"\" #@param {type:\"string\"}\n",
"if(Name_of_your_concept == \"\"):\n",
" Name_of_your_concept = Session_Name\n",
"Name_of_your_concept=Name_of_your_concept.replace(\" \",\"-\") \n",
" \n",
"Save_concept_to = \"My_Profile\" #@param [\"Public_Library\", \"My_Profile\"]\n",
"\n",
"#@markdown - [Create a write access token](https://huggingface.co/settings/tokens) , go to \"New token\" -> Role : Write. A regular read token won't work here.\n",
"hf_token_write = \"\" #@param {type:\"string\"}\n",
"if hf_token_write ==\"\":\n",
" print('\u001b[1;32mYour Hugging Face write access token : ')\n",
" hf_token_write=input()\n",
"\n",
"hf_token = hf_token_write\n",
"\n",
"api = HfApi()\n",
"your_username = api.whoami(token=hf_token)[\"name\"]\n",
"\n",
"if(Save_concept_to == \"Public_Library\"):\n",
" repo_id = f\"sd-dreambooth-library/{slugify(Name_of_your_concept)}\"\n",
" #Join the Concepts Library organization if you aren't part of it already\n",
" !curl -X POST -H 'Authorization: Bearer '$hf_token -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX\n",
"else:\n",
" repo_id = f\"{your_username}/{slugify(Name_of_your_concept)}\"\n",
"output_dir = f'/content/models/'+INSTANCE_NAME\n",
"\n",
"def bar(prg):\n",
" br=\"\u001b[1;33mUploading to HuggingFace : \" '\u001b[0m|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ \"%\"\n",
" return br\n",
"\n",
"print(\"\u001b[1;32mLoading...\")\n",
"\n",
"NM=\"False\"\n",
"if os.path.getsize(OUTPUT_DIR+\"/text_encoder/pytorch_model.bin\") > 670901463:\n",
" NM=\"True\"\n",
"\n",
"with capture.capture_output() as cap:\n",
" if NM==\"False\":\n",
" %cd $OUTPUT_DIR\n",
" !rm -r safety_checker feature_extractor .git\n",
" !rm model_index.json\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"https://USER:{hf_token}@huggingface.co/runwayml/stable-diffusion-v1-5\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"feature_extractor\\nsafety_checker\\nmodel_index.json\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" !rm -r .git\n",
" %cd /content\n",
" else:\n",
" %cd $OUTPUT_DIR\n",
" !rm -r feature_extractor .git\n",
" !git init\n",
" !git lfs install --system --skip-repo\n",
" !git remote add -f origin \"https://USER:{hf_token}@huggingface.co/stabilityai/stable-diffusion-2-1\"\n",
" !git config core.sparsecheckout true\n",
" !echo -e \"feature_extractor\" > .git/info/sparse-checkout\n",
" !git pull origin main\n",
" !rm -r .git\n",
" %cd /content\n",
"\n",
"\n",
"image_string = \"\"\n",
"\n",
"if os.path.exists('/content/sample_images'):\n",
" !rm -r /content/sample_images\n",
"Samples=\"/content/sample_images\"\n",
"!mkdir $Samples\n",
"clear_output()\n",
"\n",
"if Upload_sample_images:\n",
"\n",
" print(\"\u001b[1;32mUpload Sample images of the model\")\n",
" uploaded = files.upload()\n",
" for filename in uploaded.keys():\n",
" shutil.move(filename, Samples)\n",
" %cd $Samples\n",
" !find . -name \"* *\" -type f | rename 's/ /_/g'\n",
" %cd /content\n",
" clear_output()\n",
"\n",
" print(bar(1))\n",
"\n",
" images_upload = os.listdir(Samples)\n",
" instance_prompt_list = []\n",
" for i, image in enumerate(images_upload):\n",
" image_string = f'''\n",
" {image_string}![{i}](https://huggingface.co/{repo_id}/resolve/main/sample_images/{image})\n",
" '''\n",
" \n",
"readme_text = f'''---\n",
"license: creativeml-openrail-m\n",
"tags:\n",
"- text-to-image\n",
"- stable-diffusion\n",
"---\n",
"### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)[\"name\"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook\n",
"\n",
"\n",
"Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)\n",
"\n",
"Sample pictures of this concept:\n",
"{image_string}\n",
"'''\n",
"#Save the readme to a file\n",
"readme_file = open(\"README.md\", \"w\")\n",
"readme_file.write(readme_text)\n",
"readme_file.close()\n",
"\n",
"operations = [\n",
" CommitOperationAdd(path_in_repo=\"README.md\", path_or_fileobj=\"README.md\"),\n",
" CommitOperationAdd(path_in_repo=f\"{Session_Name}.ckpt\",path_or_fileobj=MDLPTH)\n",
"\n",
"]\n",
"create_repo(repo_id,private=True, token=hf_token)\n",
"\n",
"api.create_commit(\n",
" repo_id=repo_id,\n",
" operations=operations,\n",
" commit_message=f\"Upload the concept {Name_of_your_concept} embeds and token\",\n",
" token=hf_token\n",
")\n",
"\n",
"api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/feature_extractor\",\n",
" path_in_repo=\"feature_extractor\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(4))\n",
"\n",
"if NM==\"False\":\n",
" api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/safety_checker\",\n",
" path_in_repo=\"safety_checker\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
" )\n",
"\n",
"clear_output()\n",
"print(bar(8))\n",
"\n",
"\n",
"api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/scheduler\",\n",
" path_in_repo=\"scheduler\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(9))\n",
"\n",
"api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/text_encoder\",\n",
" path_in_repo=\"text_encoder\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(12))\n",
"\n",
"api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/tokenizer\",\n",
" path_in_repo=\"tokenizer\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(13))\n",
"\n",
"api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/unet\",\n",
" path_in_repo=\"unet\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(21))\n",
"\n",
"api.upload_folder(\n",
" folder_path=OUTPUT_DIR+\"/vae\",\n",
" path_in_repo=\"vae\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(23))\n",
"\n",
"api.upload_file(\n",
" path_or_fileobj=OUTPUT_DIR+\"/model_index.json\",\n",
" path_in_repo=\"model_index.json\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(24))\n",
"\n",
"api.upload_folder(\n",
" folder_path=Samples,\n",
" path_in_repo=\"sample_images\",\n",
" repo_id=repo_id,\n",
" token=hf_token\n",
")\n",
"\n",
"clear_output()\n",
"print(bar(25))\n",
"\n",
"display_markdown(f'''## Your concept was saved successfully. [Click here to access it](https://huggingface.co/{repo_id})\n",
"''', raw=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "iVqNi8IDzA1Z"
},
"outputs": [],
"source": [
"#@markdown #Free Gdrive Space\n",
"\n",
"#@markdown Display the list of sessions from your gdrive and choose which ones to remove.\n",
"\n",
"import ipywidgets as widgets\n",
"\n",
"Sessions=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\")\n",
"\n",
"s = widgets.Select(\n",
" options=Sessions,\n",
" rows=5,\n",
" description='',\n",
" disabled=False\n",
")\n",
"\n",
"out=widgets.Output()\n",
"\n",
"d = widgets.Button(\n",
" description='Remove',\n",
" disabled=False,\n",
" button_style='warning',\n",
" tooltip='Removet the selected session',\n",
" icon='warning'\n",
")\n",
"\n",
"def rem(d):\n",
" with out:\n",
" if s.value is not None:\n",
" clear_output()\n",
" print(\"\u001b[1;33mTHE SESSION \u001b[1;31m\"+s.value+\" \u001b[1;33mHAS BEEN REMOVED FROM YOUR GDRIVE\")\n",
" !rm -r '/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/{s.value}'\n",
" s.options=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\") \n",
" else:\n",
" d.close()\n",
" s.close()\n",
" clear_output()\n",
" print(\"\u001b[1;32mNOTHING TO REMOVE\")\n",
"\n",
"d.on_click(rem)\n",
"if s.value is not None:\n",
" display(s,d,out)\n",
"else:\n",
" print(\"\u001b[1;32mNOTHING TO REMOVE\")"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment