Skip to content

Instantly share code, notes, and snippets.

@nogawanogawa
Created February 11, 2021 13:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save nogawanogawa/280e8fab098e8e5dc7b9d7638b9cd39a to your computer and use it in GitHub Desktop.
Save nogawanogawa/280e8fab098e8e5dc7b9d7638b9cd39a to your computer and use it in GitHub Desktop.
Resnet50_sample.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Resnet50_sample.ipynb",
"provenance": [],
"collapsed_sections": [],
"authorship_tag": "ABX9TyNTO/QPgEr48ZI6eyFDgaHh",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/nogawanogawa/280e8fab098e8e5dc7b9d7638b9cd39a/resnet50_sample.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "6CNDnDWZEWew"
},
"source": [
"!wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\n",
"!tar zxvf images.tar.gz"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "6OvBj1iBEYjL"
},
"source": [
"import os\n",
"import glob\n",
"import re \n",
"import pandas as pd\n",
"from PIL import Image\n",
"from torch.utils.data import Dataset\n",
"import pandas as pd\n",
"import os\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"from torchvision.models import resnet50\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"\n",
"from sklearn.metrics import classification_report\n",
"from sklearn import preprocessing\n",
"import datetime\n"
],
"execution_count": 1,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "0KgnpwqNIbVd"
},
"source": [
"class MyDataSet(Dataset):\n",
" def __init__(self):\n",
" \n",
" l = glob.glob('images/*.jpg')\n",
" self.train_df = pd.DataFrame()\n",
" self.images = []\n",
" self.labels = []\n",
" self.le = preprocessing.LabelEncoder()\n",
"\n",
" for path in l:\n",
" self.images.append(path)\n",
" self.labels.append(re.split('[/_.]', path)[1])\n",
"\n",
" self.le.fit(self.labels)\n",
" self.labels_id = self.le.transform(self.labels)\n",
" self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])\n",
"\n",
" def __len__(self):\n",
" return len(self.images)\n",
" \n",
" def __getitem__(self, idx):\n",
" image = Image.open(self.images[idx])\n",
" image = image.convert('RGB')\n",
" label = self.labels_id[idx]\n",
" return self.transform(image), int(label)\n",
"\n",
"dataset = MyDataSet()\n",
"\n",
"n_samples = len(dataset)\n",
"train_size = int(len(dataset) * 0.7)\n",
"val_size = n_samples - train_size\n",
"\n",
"train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])\n",
"\n",
"train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True)\n",
"val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=16, shuffle=True)\n"
],
"execution_count": 2,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "ZlttsB5aI-Uz"
},
"source": [
"model = resnet50(pretrained=True)\n",
"model.fc = nn.Linear(2048, 35)"
],
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "BENWkuCFSaXC"
},
"source": [
"device=torch.device('cuda')\n",
"model.cuda()"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "tuDtAND6S0PF"
},
"source": [
"criterion = nn.CrossEntropyLoss()\n",
"optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)"
],
"execution_count": 5,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "OLhm0E4vVQyk",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "94dbcd23-5fcb-4129-f2ae-e46306c41798"
},
"source": [
"def train(epoch):\n",
" total_loss = 0\n",
" total_size = 0\n",
" model.train()\n",
" for batch_idx, (data, target) in enumerate(train_loader):\n",
" data, target = data.to(device), target.to(device)\n",
" optimizer.zero_grad()\n",
" output = model(data)\n",
" loss = criterion(output, target)\n",
" total_loss += loss.item()\n",
" total_size += data.size(0)\n",
" loss.backward()\n",
" optimizer.step()\n",
" if batch_idx % 1000 == 0:\n",
" now = datetime.datetime.now()\n",
" print('[{}] Train Epoch: {} [{}/{} ({:.0f}%)]\\tAverage loss: {:.6f}'.format(\n",
" now,\n",
" epoch, batch_idx * len(data), len(train_loader.dataset),\n",
" 100. * batch_idx / len(train_loader), total_loss / total_size))\n",
"\n",
"for epoch in range(50):\n",
" train(epoch)\n"
],
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"text": [
"[2021-02-11 12:16:51.962752] Train Epoch: 0 [0/5173 (0%)]\tAverage loss: 0.088912\n",
"[2021-02-11 12:17:46.724005] Train Epoch: 1 [0/5173 (0%)]\tAverage loss: 0.062986\n",
"[2021-02-11 12:18:41.411175] Train Epoch: 2 [0/5173 (0%)]\tAverage loss: 0.043171\n",
"[2021-02-11 12:19:35.992185] Train Epoch: 3 [0/5173 (0%)]\tAverage loss: 0.054072\n",
"[2021-02-11 12:20:30.475818] Train Epoch: 4 [0/5173 (0%)]\tAverage loss: 0.018975\n",
"[2021-02-11 12:21:24.792461] Train Epoch: 5 [0/5173 (0%)]\tAverage loss: 0.044188\n",
"[2021-02-11 12:22:19.138908] Train Epoch: 6 [0/5173 (0%)]\tAverage loss: 0.004164\n",
"[2021-02-11 12:23:13.449962] Train Epoch: 7 [0/5173 (0%)]\tAverage loss: 0.004598\n",
"[2021-02-11 12:24:07.757432] Train Epoch: 8 [0/5173 (0%)]\tAverage loss: 0.016730\n",
"[2021-02-11 12:25:02.124637] Train Epoch: 9 [0/5173 (0%)]\tAverage loss: 0.001853\n",
"[2021-02-11 12:25:56.408605] Train Epoch: 10 [0/5173 (0%)]\tAverage loss: 0.000478\n",
"[2021-02-11 12:26:50.860898] Train Epoch: 11 [0/5173 (0%)]\tAverage loss: 0.006190\n",
"[2021-02-11 12:27:45.156743] Train Epoch: 12 [0/5173 (0%)]\tAverage loss: 0.001362\n",
"[2021-02-11 12:28:39.369146] Train Epoch: 13 [0/5173 (0%)]\tAverage loss: 0.004290\n",
"[2021-02-11 12:29:33.491757] Train Epoch: 14 [0/5173 (0%)]\tAverage loss: 0.001080\n",
"[2021-02-11 12:30:27.692780] Train Epoch: 15 [0/5173 (0%)]\tAverage loss: 0.001071\n",
"[2021-02-11 12:31:21.914283] Train Epoch: 16 [0/5173 (0%)]\tAverage loss: 0.000048\n",
"[2021-02-11 12:32:16.084903] Train Epoch: 17 [0/5173 (0%)]\tAverage loss: 0.000206\n",
"[2021-02-11 12:33:10.221429] Train Epoch: 18 [0/5173 (0%)]\tAverage loss: 0.000048\n",
"[2021-02-11 12:34:04.370542] Train Epoch: 19 [0/5173 (0%)]\tAverage loss: 0.004290\n",
"[2021-02-11 12:34:58.607997] Train Epoch: 20 [0/5173 (0%)]\tAverage loss: 0.003593\n",
"[2021-02-11 12:35:52.772419] Train Epoch: 21 [0/5173 (0%)]\tAverage loss: 0.001346\n",
"[2021-02-11 12:36:46.922835] Train Epoch: 22 [0/5173 (0%)]\tAverage loss: 0.003374\n",
"[2021-02-11 12:37:41.096551] Train Epoch: 23 [0/5173 (0%)]\tAverage loss: 0.000158\n",
"[2021-02-11 12:38:35.179317] Train Epoch: 24 [0/5173 (0%)]\tAverage loss: 0.000677\n",
"[2021-02-11 12:39:29.258060] Train Epoch: 25 [0/5173 (0%)]\tAverage loss: 0.000173\n",
"[2021-02-11 12:40:23.444145] Train Epoch: 26 [0/5173 (0%)]\tAverage loss: 0.000118\n",
"[2021-02-11 12:41:17.526034] Train Epoch: 27 [0/5173 (0%)]\tAverage loss: 0.001065\n",
"[2021-02-11 12:42:11.674821] Train Epoch: 28 [0/5173 (0%)]\tAverage loss: 0.000034\n",
"[2021-02-11 12:43:05.819970] Train Epoch: 29 [0/5173 (0%)]\tAverage loss: 0.000056\n",
"[2021-02-11 12:43:59.922013] Train Epoch: 30 [0/5173 (0%)]\tAverage loss: 0.000014\n",
"[2021-02-11 12:44:54.088834] Train Epoch: 31 [0/5173 (0%)]\tAverage loss: 0.000306\n",
"[2021-02-11 12:45:48.299070] Train Epoch: 32 [0/5173 (0%)]\tAverage loss: 0.000002\n",
"[2021-02-11 12:46:42.376260] Train Epoch: 33 [0/5173 (0%)]\tAverage loss: 0.000029\n",
"[2021-02-11 12:47:36.531758] Train Epoch: 34 [0/5173 (0%)]\tAverage loss: 0.000010\n",
"[2021-02-11 12:48:30.697968] Train Epoch: 35 [0/5173 (0%)]\tAverage loss: 0.000055\n",
"[2021-02-11 12:49:24.870694] Train Epoch: 36 [0/5173 (0%)]\tAverage loss: 0.000108\n",
"[2021-02-11 12:50:19.053750] Train Epoch: 37 [0/5173 (0%)]\tAverage loss: 0.000062\n",
"[2021-02-11 12:51:13.183125] Train Epoch: 38 [0/5173 (0%)]\tAverage loss: 0.000022\n",
"[2021-02-11 12:52:07.219052] Train Epoch: 39 [0/5173 (0%)]\tAverage loss: 0.000010\n",
"[2021-02-11 12:53:01.388245] Train Epoch: 40 [0/5173 (0%)]\tAverage loss: 0.003121\n",
"[2021-02-11 12:53:55.422218] Train Epoch: 41 [0/5173 (0%)]\tAverage loss: 0.000953\n",
"[2021-02-11 12:54:49.611462] Train Epoch: 42 [0/5173 (0%)]\tAverage loss: 0.000021\n",
"[2021-02-11 12:55:43.951275] Train Epoch: 43 [0/5173 (0%)]\tAverage loss: 0.000002\n",
"[2021-02-11 12:56:38.034987] Train Epoch: 44 [0/5173 (0%)]\tAverage loss: 0.000006\n",
"[2021-02-11 12:57:32.162134] Train Epoch: 45 [0/5173 (0%)]\tAverage loss: 0.000004\n",
"[2021-02-11 12:58:26.251923] Train Epoch: 46 [0/5173 (0%)]\tAverage loss: 0.000007\n",
"[2021-02-11 12:59:20.364063] Train Epoch: 47 [0/5173 (0%)]\tAverage loss: 0.000009\n",
"[2021-02-11 13:00:14.427916] Train Epoch: 48 [0/5173 (0%)]\tAverage loss: 0.000002\n",
"[2021-02-11 13:01:08.546507] Train Epoch: 49 [0/5173 (0%)]\tAverage loss: 0.003455\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "nqQOVSmLVVDO",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "3ed38823-2662-438b-a2c5-038ba4a5d2a1"
},
"source": [
"pred = []\n",
"Y = []\n",
"for i, (data, target) in enumerate(val_loader):\n",
" with torch.no_grad():\n",
" data, target = data.to(device), target.to(device)\n",
" output = model(data)\n",
" pred += [int(l.argmax()) for l in output]\n",
" Y += [int(l) for l in target]\n",
"\n",
"print(classification_report(Y, pred))"
],
"execution_count": 8,
"outputs": [
{
"output_type": "stream",
"text": [
" precision recall f1-score support\n",
"\n",
" 0 0.90 0.88 0.89 72\n",
" 1 0.79 0.76 0.77 54\n",
" 2 0.73 0.87 0.80 54\n",
" 3 0.82 0.95 0.88 73\n",
" 4 0.93 0.82 0.87 65\n",
" 5 0.88 0.86 0.87 59\n",
" 6 0.79 0.90 0.84 59\n",
" 7 0.91 0.87 0.89 67\n",
" 8 0.75 0.77 0.76 53\n",
" 9 0.89 0.84 0.86 56\n",
" 10 0.92 0.75 0.83 65\n",
" 11 0.87 0.79 0.83 66\n",
" 12 0.61 0.76 0.68 113\n",
" 13 0.79 0.70 0.74 69\n",
" 14 0.72 0.67 0.69 75\n",
" 15 0.73 0.78 0.75 55\n",
" 16 0.77 0.82 0.79 56\n",
" 17 0.90 0.85 0.88 120\n",
" 18 0.92 0.84 0.88 56\n",
" 19 0.83 0.87 0.85 52\n",
" 20 0.86 0.86 0.86 49\n",
" 21 0.91 0.91 0.91 55\n",
" 22 0.79 0.94 0.86 53\n",
" 23 0.81 0.85 0.83 54\n",
" 24 0.80 0.77 0.78 66\n",
" 25 0.94 0.92 0.93 71\n",
" 26 0.96 0.94 0.95 47\n",
" 27 0.92 0.90 0.91 52\n",
" 28 0.77 0.89 0.83 46\n",
" 29 0.88 0.91 0.90 58\n",
" 30 0.86 0.86 0.86 72\n",
" 31 0.93 0.93 0.93 70\n",
" 32 0.68 0.56 0.61 54\n",
" 33 0.87 0.77 0.82 70\n",
" 34 0.90 0.85 0.87 61\n",
"\n",
" accuracy 0.83 2217\n",
" macro avg 0.84 0.83 0.83 2217\n",
"weighted avg 0.84 0.83 0.83 2217\n",
"\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "T4R7UE-i9kMn"
},
"source": [
""
],
"execution_count": 8,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment