Created
November 15, 2021 18:58
-
-
Save josephrocca/78b327f78531a2cb3b28e0f1b9a92d57 to your computer and use it in GitHub Desktop.
ICT - Image Completion Transformer.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"nbformat": 4, | |
"nbformat_minor": 0, | |
"metadata": { | |
"colab": { | |
"name": "ICT - Image Completion Transformer.ipynb", | |
"provenance": [], | |
"collapsed_sections": [], | |
"machine_shape": "hm", | |
"mount_file_id": "1lENO-Yd2mOAl1tXd_Xz6869ze-YsAYcX", | |
"authorship_tag": "ABX9TyMLoG6iathC1KK3FnHrrhBn", | |
"include_colab_link": true | |
}, | |
"kernelspec": { | |
"name": "python3", | |
"display_name": "Python 3" | |
}, | |
"language_info": { | |
"name": "python" | |
}, | |
"accelerator": "GPU" | |
}, | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"id": "view-in-github", | |
"colab_type": "text" | |
}, | |
"source": [ | |
"<a href=\"https://colab.research.google.com/gist/josephrocca/78b327f78531a2cb3b28e0f1b9a92d57/ict-image-completion-transformer.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "i7I7oOJJ9pG9" | |
}, | |
"source": [ | |
"# Important: Ensure it's using a GPU runtime with the menu: Runtime > Change runtime type..." | |
], | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "VVt5DhvbdByH" | |
}, | |
"source": [ | |
"!git clone https://github.com/raywzy/ICT\n", | |
"%cd ICT" | |
], | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "ADzwzJH6f29Q" | |
}, | |
"source": [ | |
"!sed -i '/scipy/d' ./requirements.txt # doesn't actually use scipy so we delete it (but it's still imported by some scripts - so we'll just use colab's version to save the strangely-long compile time of v1.0.1)\n", | |
"!sed -i '/numpy/d' ./requirements.txt # to prevent colab dependency problems, use colab's version of numpy (hopefully it works)\n", | |
"!sed -i '/matplotlib/d' ./requirements.txt # dependency problems again\n", | |
"!pip install -r requirements.txt" | |
], | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "lHQrsOMTfrAe" | |
}, | |
"source": [ | |
"!wget -O ckpts_ICT.zip https://www.dropbox.com/s/cqjgcj0serkbdxd/ckpts_ICT.zip?dl=1\n", | |
"!unzip ckpts_ICT.zip" | |
], | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "zg-npfrY7bc4" | |
}, | |
"source": [ | |
"# ImageNet config.yml has two GPUs specified, so we switch it to just one:\n", | |
"!sed -i 's/GPU: \\[0,1\\]/GPU: \\[0\\]/g' /content/ICT/ckpts_ICT/Upsample/ImageNet/config.yml" | |
], | |
"execution_count": 9, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "bZWF1r6KiZBx" | |
}, | |
"source": [ | |
"!mkdir input_images\n", | |
"!mkdir mask_images\n", | |
"!mkdir output_images" | |
], | |
"execution_count": 4, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "waYkidTGi4Xy" | |
}, | |
"source": [ | |
"!wget https://i.imgur.com/SgdJJOq.png -O ./input_images/obama.png\n", | |
"!wget https://i.imgur.com/cUlpeD7.png -O ./mask_images/obama.png" | |
], | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "bh4l0Fp4dPbN", | |
"colab": { | |
"base_uri": "https://localhost:8080/" | |
}, | |
"outputId": "ff950935-bdce-4c74-e43a-089aeec93bda" | |
}, | |
"source": [ | |
"# ImageNet\n", | |
"!python run.py --input_image /content/ICT/input_images --input_mask /content/ICT/mask_images --sample_num 1 --save_place /content/ICT/output_images --ImageNet --visualize_all" | |
], | |
"execution_count": 10, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"name": "stdout", | |
"text": [ | |
"Traceback (most recent call last):\n", | |
" File \"inference.py\", line 10, in <module>\n", | |
" from datas.dataset import ImageDataset\n", | |
"ImportError: cannot import name 'ImageDataset' from 'datas.dataset' (/content/ICT/Transformer/datas/dataset.py)\n", | |
"Finish the Stage 1 - Appearance Priors Reconstruction using Transformer\n", | |
"*******remove IN*******\n", | |
"Loading InpaintingModel generator...\n", | |
"Traceback (most recent call last):\n", | |
" File \"test.py\", line 4, in <module>\n", | |
" main(mode=2)\n", | |
" File \"/content/ICT/Guided_Upsample/main.py\", line 49, in main\n", | |
" model.load()\n", | |
" File \"/content/ICT/Guided_Upsample/src/Guided_Upsampler.py\", line 48, in load\n", | |
" self.inpaint_model.load()\n", | |
" File \"/content/ICT/Guided_Upsample/src/models.py\", line 29, in load\n", | |
" self.generator.load_state_dict(data['generator'])\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\", line 1483, in load_state_dict\n", | |
" self.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n", | |
"RuntimeError: Error(s) in loading state_dict for InpaintGenerator_5:\n", | |
"\tMissing key(s) in state_dict: \"encoder.1.weight\", \"encoder.1.bias\", \"encoder.3.weight\", \"encoder.3.bias\", \"encoder.5.weight\", \"encoder.5.bias\", \"middle.0.conv_block.1.weight\", \"middle.0.conv_block.1.bias\", \"middle.0.conv_block.4.weight\", \"middle.0.conv_block.4.bias\", \"middle.1.conv_block.1.weight\", \"middle.1.conv_block.1.bias\", \"middle.1.conv_block.4.weight\", \"middle.1.conv_block.4.bias\", \"middle.2.conv_block.1.weight\", \"middle.2.conv_block.1.bias\", \"middle.2.conv_block.4.weight\", \"middle.2.conv_block.4.bias\", \"middle.3.conv_block.1.weight\", \"middle.3.conv_block.1.bias\", \"middle.3.conv_block.4.weight\", \"middle.3.conv_block.4.bias\", \"middle.4.conv_block.1.weight\", \"middle.4.conv_block.1.bias\", \"middle.4.conv_block.4.weight\", \"middle.4.conv_block.4.bias\", \"middle.5.conv_block.1.weight\", \"middle.5.conv_block.1.bias\", \"middle.5.conv_block.4.weight\", \"middle.5.conv_block.4.bias\", \"middle.6.conv_block.1.weight\", \"middle.6.conv_block.1.bias\", \"middle.6.conv_block.4.weight\", \"middle.6.conv_block.4.bias\", \"middle.7.conv_block.1.weight\", \"middle.7.conv_block.1.bias\", \"middle.7.conv_block.4.weight\", \"middle.7.conv_block.4.bias\", \"decoder.0.weight\", \"decoder.0.bias\", \"decoder.2.weight\", \"decoder.2.bias\", \"decoder.5.weight\", \"decoder.5.bias\". \n", | |
"\tUnexpected key(s) in state_dict: \"module.encoder.1.weight\", \"module.encoder.1.bias\", \"module.encoder.3.weight\", \"module.encoder.3.bias\", \"module.encoder.5.weight\", \"module.encoder.5.bias\", \"module.middle.0.conv_block.1.weight\", \"module.middle.0.conv_block.1.bias\", \"module.middle.0.conv_block.4.weight\", \"module.middle.0.conv_block.4.bias\", \"module.middle.1.conv_block.1.weight\", \"module.middle.1.conv_block.1.bias\", \"module.middle.1.conv_block.4.weight\", \"module.middle.1.conv_block.4.bias\", \"module.middle.2.conv_block.1.weight\", \"module.middle.2.conv_block.1.bias\", \"module.middle.2.conv_block.4.weight\", \"module.middle.2.conv_block.4.bias\", \"module.middle.3.conv_block.1.weight\", \"module.middle.3.conv_block.1.bias\", \"module.middle.3.conv_block.4.weight\", \"module.middle.3.conv_block.4.bias\", \"module.middle.4.conv_block.1.weight\", \"module.middle.4.conv_block.1.bias\", \"module.middle.4.conv_block.4.weight\", \"module.middle.4.conv_block.4.bias\", \"module.middle.5.conv_block.1.weight\", \"module.middle.5.conv_block.1.bias\", \"module.middle.5.conv_block.4.weight\", \"module.middle.5.conv_block.4.bias\", \"module.middle.6.conv_block.1.weight\", \"module.middle.6.conv_block.1.bias\", \"module.middle.6.conv_block.4.weight\", \"module.middle.6.conv_block.4.bias\", \"module.middle.7.conv_block.1.weight\", \"module.middle.7.conv_block.1.bias\", \"module.middle.7.conv_block.4.weight\", \"module.middle.7.conv_block.4.bias\", \"module.decoder.0.weight\", \"module.decoder.0.bias\", \"module.decoder.2.weight\", \"module.decoder.2.bias\", \"module.decoder.5.weight\", \"module.decoder.5.bias\". \n", | |
"Finish the Stage 2 - Guided Upsampling\n", | |
"Please check the results ...\n" | |
] | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"colab": { | |
"base_uri": "https://localhost:8080/" | |
}, | |
"id": "s71UtygW38hJ", | |
"outputId": "df80b0ad-b05a-4350-beca-61ca5084c2f2" | |
}, | |
"source": [ | |
"# FFHQ\n", | |
"!python run.py --input_image /content/ICT/input_images --input_mask /content/ICT/mask_images --sample_num 1 --save_place /content/ICT/output_images --FFHQ --visualize_all" | |
], | |
"execution_count": 11, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"name": "stdout", | |
"text": [ | |
"Traceback (most recent call last):\n", | |
" File \"inference.py\", line 10, in <module>\n", | |
" from datas.dataset import ImageDataset\n", | |
"ImportError: cannot import name 'ImageDataset' from 'datas.dataset' (/content/ICT/Transformer/datas/dataset.py)\n", | |
"Finish the Stage 1 - Appearance Priors Reconstruction using Transformer\n", | |
"*******remove IN*******\n", | |
"Loading InpaintingModel generator...\n", | |
"\n", | |
"start testing...\n", | |
"\n", | |
"/usr/local/lib/python3.7/dist-packages/PIL/Image.py:960: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images\n", | |
" \"Palette images with Transparency expressed in bytes should be \"\n", | |
"loading error: /content/ICT/input_images/obama.png\n", | |
"Traceback (most recent call last):\n", | |
" File \"/content/ICT/Guided_Upsample/src/dataset_my.py\", line 63, in __getitem__\n", | |
" item = self.load_item(index)\n", | |
" File \"/content/ICT/Guided_Upsample/src/dataset_my.py\", line 95, in load_item\n", | |
" prior = self.load_prior(img, index)\n", | |
" File \"/content/ICT/Guided_Upsample/src/dataset_my.py\", line 133, in load_prior\n", | |
" edge = Image.open(self.edge_data[index]).convert(\"RGB\")\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/PIL/Image.py\", line 2843, in open\n", | |
" fp = builtins.open(filename, \"rb\")\n", | |
"FileNotFoundError: [Errno 2] No such file or directory: '/content/ICT/output_images/AP/condition_1/obama.png'\n", | |
"\n", | |
"During handling of the above exception, another exception occurred:\n", | |
"\n", | |
"Traceback (most recent call last):\n", | |
" File \"test.py\", line 4, in <module>\n", | |
" main(mode=2)\n", | |
" File \"/content/ICT/Guided_Upsample/main.py\", line 61, in main\n", | |
" model.test()\n", | |
" File \"/content/ICT/Guided_Upsample/src/Guided_Upsampler.py\", line 199, in test\n", | |
" for items in test_loader:\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py\", line 521, in __next__\n", | |
" data = self._next_data()\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py\", line 561, in _next_data\n", | |
" data = self._dataset_fetcher.fetch(index) # may raise StopIteration\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/_utils/fetch.py\", line 49, in fetch\n", | |
" data = [self.dataset[idx] for idx in possibly_batched_index]\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/torch/utils/data/_utils/fetch.py\", line 49, in <listcomp>\n", | |
" data = [self.dataset[idx] for idx in possibly_batched_index]\n", | |
" File \"/content/ICT/Guided_Upsample/src/dataset_my.py\", line 66, in __getitem__\n", | |
" item = self.load_item(0)\n", | |
" File \"/content/ICT/Guided_Upsample/src/dataset_my.py\", line 95, in load_item\n", | |
" prior = self.load_prior(img, index)\n", | |
" File \"/content/ICT/Guided_Upsample/src/dataset_my.py\", line 133, in load_prior\n", | |
" edge = Image.open(self.edge_data[index]).convert(\"RGB\")\n", | |
" File \"/usr/local/lib/python3.7/dist-packages/PIL/Image.py\", line 2843, in open\n", | |
" fp = builtins.open(filename, \"rb\")\n", | |
"FileNotFoundError: [Errno 2] No such file or directory: '/content/ICT/output_images/AP/condition_1/obama.png'\n", | |
"Finish the Stage 2 - Guided Upsampling\n", | |
"Please check the results ...\n" | |
] | |
} | |
] | |
} | |
] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment