Skip to content

Instantly share code, notes, and snippets.

@adamamer20
Created April 25, 2024 06:39
Show Gist options
  • Save adamamer20/122c54777165a567f9275677a849a534 to your computer and use it in GitHub Desktop.
Save adamamer20/122c54777165a567f9275677a849a534 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"from functools import partial\n",
"\n",
"import timm\n",
"from fastai import test_utils\n",
"from fastai.vision.all import (\n",
" ImageDataLoaders,\n",
" Resize,\n",
" URLs,\n",
" error_rate,\n",
" get_image_files,\n",
" untar_data,\n",
" vision_learner,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"```text\n",
"=== Software === \n",
"python : 3.11.9\n",
"fastai : 2.7.14\n",
"fastcore : 1.5.29\n",
"fastprogress : 1.0.3\n",
"torch : 2.2.1+cpu\n",
"torch cuda : None / is **Not available** \n",
"\n",
"=== Hardware === \n",
"No GPUs available \n",
"\n",
"=== Environment === \n",
"platform : Windows-10-10.0.22631-SP0\n",
"conda env : Unknown\n",
"python : c:\\ProgramData\\miniforge3\\envs\\finetuning\\python.exe\n",
"sys.path : c:\\Users\\adiad\\OneDrive - Università Commerciale Luigi Bocconi\\Documenti\\Projects\\restaurants\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\python311.zip\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\DLLs\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\n",
"\n",
"C:\\Users\\adiad\\AppData\\Roaming\\Python\\Python311\\site-packages\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\win32\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\win32\\lib\n",
"c:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\Pythonwin\n",
"no supported gpus found on this system\n",
"```\n",
"\n",
"Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n",
"\n"
]
}
],
"source": [
"test_utils.show_install(1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Create standard dataset with files"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"path = untar_data(URLs.PETS)\n",
"\n",
"files = get_image_files(path / \"images\")\n",
"\n",
"\n",
"def label_func(f):\n",
" return f[0].isupper()\n",
"\n",
"\n",
"dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Download pretrained model and cut the last two layers. StopIteration is returned because the model does not have pooling layers and thus \"automatic\" cut is not possible. The passed \"cut\" parameter had no effect"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"ename": "StopIteration",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mStopIteration\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[4], line 5\u001b[0m\n\u001b[0;32m 1\u001b[0m timm_model \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhf_hub:anonauthors/food101-timm-vit_base_patch16_224.orig_in21k_ft_in1k\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 3\u001b[0m model \u001b[38;5;241m=\u001b[39m partial(timm\u001b[38;5;241m.\u001b[39mcreate_model, timm_model, pretrained\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m----> 5\u001b[0m learn \u001b[38;5;241m=\u001b[39m \u001b[43mvision_learner\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdls\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetrics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merror_rate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcut\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32mc:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\fastai\\vision\\learner.py:236\u001b[0m, in \u001b[0;36mvision_learner\u001b[1;34m(dls, arch, normalize, n_out, pretrained, weights, loss_func, opt_func, lr, splitter, cbs, metrics, path, model_dir, wd, wd_bn_bias, train_bn, moms, cut, init, custom_head, concat_pool, pool, lin_ftrs, ps, first_bn, bn_final, lin_first, y_range, **kwargs)\u001b[0m\n\u001b[0;32m 234\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m 235\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m normalize: _add_norm(dls, meta, pretrained, n_in)\n\u001b[1;32m--> 236\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mcreate_vision_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43march\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_out\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpretrained\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpretrained\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_args\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 238\u001b[0m splitter \u001b[38;5;241m=\u001b[39m ifnone(splitter, meta[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msplit\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[0;32m 239\u001b[0m learn \u001b[38;5;241m=\u001b[39m Learner(dls\u001b[38;5;241m=\u001b[39mdls, model\u001b[38;5;241m=\u001b[39mmodel, loss_func\u001b[38;5;241m=\u001b[39mloss_func, opt_func\u001b[38;5;241m=\u001b[39mopt_func, lr\u001b[38;5;241m=\u001b[39mlr, splitter\u001b[38;5;241m=\u001b[39msplitter, cbs\u001b[38;5;241m=\u001b[39mcbs,\n\u001b[0;32m 240\u001b[0m metrics\u001b[38;5;241m=\u001b[39mmetrics, path\u001b[38;5;241m=\u001b[39mpath, model_dir\u001b[38;5;241m=\u001b[39mmodel_dir, wd\u001b[38;5;241m=\u001b[39mwd, wd_bn_bias\u001b[38;5;241m=\u001b[39mwd_bn_bias, train_bn\u001b[38;5;241m=\u001b[39mtrain_bn, moms\u001b[38;5;241m=\u001b[39mmoms)\n",
"File \u001b[1;32mc:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\fastai\\vision\\learner.py:173\u001b[0m, in \u001b[0;36mcreate_vision_model\u001b[1;34m(arch, n_out, pretrained, weights, cut, n_in, init, custom_head, concat_pool, pool, lin_ftrs, ps, first_bn, bn_final, lin_first, y_range)\u001b[0m\n\u001b[0;32m 171\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m 172\u001b[0m model \u001b[38;5;241m=\u001b[39m arch(pretrained\u001b[38;5;241m=\u001b[39mpretrained)\n\u001b[1;32m--> 173\u001b[0m body \u001b[38;5;241m=\u001b[39m \u001b[43mcreate_body\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_in\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpretrained\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mifnone\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcut\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmeta\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mcut\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 174\u001b[0m nf \u001b[38;5;241m=\u001b[39m num_features_model(nn\u001b[38;5;241m.\u001b[39mSequential(\u001b[38;5;241m*\u001b[39mbody\u001b[38;5;241m.\u001b[39mchildren())) \u001b[38;5;28;01mif\u001b[39;00m custom_head \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 175\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m add_head(body, nf, n_out, init\u001b[38;5;241m=\u001b[39minit, head\u001b[38;5;241m=\u001b[39mcustom_head, concat_pool\u001b[38;5;241m=\u001b[39mconcat_pool, pool\u001b[38;5;241m=\u001b[39mpool,\n\u001b[0;32m 176\u001b[0m lin_ftrs\u001b[38;5;241m=\u001b[39mlin_ftrs, ps\u001b[38;5;241m=\u001b[39mps, first_bn\u001b[38;5;241m=\u001b[39mfirst_bn, bn_final\u001b[38;5;241m=\u001b[39mbn_final, lin_first\u001b[38;5;241m=\u001b[39mlin_first, y_range\u001b[38;5;241m=\u001b[39my_range)\n",
"File \u001b[1;32mc:\\ProgramData\\miniforge3\\envs\\finetuning\\Lib\\site-packages\\fastai\\vision\\learner.py:84\u001b[0m, in \u001b[0;36mcreate_body\u001b[1;34m(model, n_in, pretrained, cut)\u001b[0m\n\u001b[0;32m 82\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m cut \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 83\u001b[0m ll \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28menumerate\u001b[39m(model\u001b[38;5;241m.\u001b[39mchildren()))\n\u001b[1;32m---> 84\u001b[0m cut \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mi\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m,\u001b[49m\u001b[43mo\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mreversed\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mll\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mhas_pool_type\u001b[49m\u001b[43m(\u001b[49m\u001b[43mo\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 85\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m cut_model(model, cut)\n",
"\u001b[1;31mStopIteration\u001b[0m: "
]
}
],
"source": [
"timm_model = \"hf_hub:anonauthors/food101-timm-vit_base_patch16_224.orig_in21k_ft_in1k\"\n",
"\n",
"model = partial(timm.create_model, timm_model, pretrained=True)\n",
"\n",
"learn = vision_learner(dls, model, metrics=error_rate, cut=-2)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment