Skip to content

Instantly share code, notes, and snippets.

@INF800
Created February 20, 2021 11:22
Show Gist options
  • Save INF800/c1068da04149e8d9aec1d7191ffec76a to your computer and use it in GitHub Desktop.
Save INF800/c1068da04149e8d9aec1d7191ffec76a to your computer and use it in GitHub Desktop.
Crime-det.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Crime-det.ipynb",
"provenance": [],
"authorship_tag": "ABX9TyPa9uaCZD2GuwOOR75RKQOv",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/rakesh4real/c1068da04149e8d9aec1d7191ffec76a/crime-det.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 35
},
"id": "3N9iqEoJw3C8",
"outputId": "d425d4c8-29db-4592-cda2-fb8d28add04c"
},
"source": [
"\"\"\"\n",
"setup kaggle\n",
"\"\"\"\n",
"\n",
"import os, shutil\n",
"\n",
"os.makedirs('.kaggle', exist_ok=True)\n",
"shutil.move('kaggle.json', '.kaggle/kaggle.json')"
],
"execution_count": 1,
"outputs": [
{
"output_type": "execute_result",
"data": {
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
},
"text/plain": [
"'.kaggle/kaggle.json'"
]
},
"metadata": {
"tags": []
},
"execution_count": 1
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "7iTBNgztywmj"
},
"source": [
"os.environ['KAGGLE_CONFIG_DIR'] = '.kaggle/'"
],
"execution_count": 2,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "WLJT5_yBy1w-",
"outputId": "9cd91dab-3c23-4bef-e42e-0ee2a6506ffe"
},
"source": [
"!kaggle datasets download -d mission-ai/crimeucfdataset"
],
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"text": [
"Warning: Your Kaggle API key is readable by other users on this system! To fix this, you can run 'chmod 600 .kaggle/kaggle.json'\n",
"Downloading crimeucfdataset.zip to /content\n",
"100% 32.9G/32.9G [14:11<00:00, 33.0MB/s]\n",
"100% 32.9G/32.9G [14:11<00:00, 41.5MB/s]\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "7GgWJ7EOD6vZ"
},
"source": [
"#!rm /content/crimeucfdataset.zip"
],
"execution_count": 43,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "cMiIiLeZzeNq"
},
"source": [
"\"\"\"\n",
"due to storage constraints,\n",
"unzip only a few videos\n",
"\"\"\"\n",
"\n",
"# 32 gb!!!\n",
"# !unzip -q \"/content/crimeucfdataset.zip\""
],
"execution_count": 4,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "g-GJRMlK-ofw",
"outputId": "b0762493-d763-41c4-b083-c22aa3941551"
},
"source": [
"for CAT in [\"Abuse\", \"Arrest\", \"Arson\", \"Assault\"]:\n",
"\n",
" DIR_NAME = \"Anomaly-Videos-Part-1\"\n",
" os.makedirs(f\"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}/\", exist_ok=True)\n",
"\n",
" for i in range(1, 10):\n",
" I = f\"{i}\".zfill(3)\n",
" !unzip -j \"crimeucfdataset.zip\" \"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}/{CAT}{I}_x264.mp4\" -d \"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}/\""
],
"execution_count": 35,
"outputs": [
{
"output_type": "stream",
"text": [
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse001_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse004_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Abuse/Abuse009_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest001_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest004_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arrest/Arrest009_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson001_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson004_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Arson/Arson009_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault001_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault004_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1/Assault/Assault009_x264.mp4 \n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "9SHVtpzcAPEQ",
"outputId": "705ba244-2891-4cd6-f94d-93933c89ea73"
},
"source": [
"for CAT in [\"Burglary\", \"Explosion\", \"Fighting\"]:\n",
"\n",
" DIR_NAME = \"Anomaly-Videos-Part-2\"\n",
" os.makedirs(f\"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}/\", exist_ok=True)\n",
"\n",
" for i in range(1, 10):\n",
" I = f\"{i}\".zfill(3)\n",
" !unzip -j \"crimeucfdataset.zip\" \"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}/{CAT}{I}_x264.mp4\" -d \"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}/\""
],
"execution_count": 37,
"outputs": [
{
"output_type": "stream",
"text": [
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary001_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary004_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Burglary/Burglary009_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion001_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion004_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Explosion/Explosion009_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting001_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting002_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting003_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting004_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting005_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting006_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting007_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting008_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2/Fighting/Fighting009_x264.mp4 \n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "NYqyQqPyAoza",
"outputId": "fa3418dd-7f01-40b9-e3e2-dcac23867caf"
},
"source": [
"DIR_NAME = \"Normal-Videos-Part-1\"\n",
"os.makedirs(f\"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/\", exist_ok=True)\n",
"\n",
"CAT=\"Normal_Videos_\"\n",
"for i in range(25, 40):\n",
" I = f\"{i}\".zfill(3)\n",
" !unzip -j \"crimeucfdataset.zip\" \"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/{CAT}{I}_x264.mp4\" -d \"Anomaly_Dataset/Anomaly_Videos/{DIR_NAME}/\""
],
"execution_count": 41,
"outputs": [
{
"output_type": "stream",
"text": [
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_025_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_026_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_027_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_028_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_029_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_030_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_031_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_032_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_033_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
" inflating: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_034_x264.mp4 \n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_035_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_036_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_037_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_038_x264.mp4\n",
"Archive: crimeucfdataset.zip\n",
"caution: filename not matched: Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1/Normal_Videos_039_x264.mp4\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "lWFdDKX_0BH4",
"outputId": "34b742cb-b279-45a8-836b-63a7b8fe2de4"
},
"source": [
"!sudo apt-get install -y python-dev pkg-config\n",
"!sudo apt-get install -y \\\n",
" libavformat-dev libavcodec-dev libavdevice-dev \\\n",
" libavutil-dev libswscale-dev libswresample-dev libavfilter-dev\n",
"!pip install av\n",
"\n",
"import av\n",
"import glob\n",
"import os\n",
"import time\n",
"import tqdm\n",
"import datetime\n",
"import argparse"
],
"execution_count": 8,
"outputs": [
{
"output_type": "stream",
"text": [
"Reading package lists... Done\n",
"Building dependency tree \n",
"Reading state information... Done\n",
"pkg-config is already the newest version (0.29.1-0ubuntu2).\n",
"python-dev is already the newest version (2.7.15~rc1-1).\n",
"0 upgraded, 0 newly installed, 0 to remove and 10 not upgraded.\n",
"Reading package lists... Done\n",
"Building dependency tree \n",
"Reading state information... Done\n",
"libavcodec-dev is already the newest version (7:3.4.8-0ubuntu0.2).\n",
"libavcodec-dev set to manually installed.\n",
"libavformat-dev is already the newest version (7:3.4.8-0ubuntu0.2).\n",
"libavformat-dev set to manually installed.\n",
"libavutil-dev is already the newest version (7:3.4.8-0ubuntu0.2).\n",
"libavutil-dev set to manually installed.\n",
"libswresample-dev is already the newest version (7:3.4.8-0ubuntu0.2).\n",
"libswresample-dev set to manually installed.\n",
"libswscale-dev is already the newest version (7:3.4.8-0ubuntu0.2).\n",
"libswscale-dev set to manually installed.\n",
"The following additional packages will be installed:\n",
" libpostproc-dev\n",
"The following NEW packages will be installed:\n",
" libavdevice-dev libavfilter-dev libpostproc-dev\n",
"0 upgraded, 3 newly installed, 0 to remove and 10 not upgraded.\n",
"Need to get 1,154 kB of archives.\n",
"After this operation, 5,444 kB of additional disk space will be used.\n",
"Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libpostproc-dev amd64 7:3.4.8-0ubuntu0.2 [51.0 kB]\n",
"Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libavfilter-dev amd64 7:3.4.8-0ubuntu0.2 [1,016 kB]\n",
"Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libavdevice-dev amd64 7:3.4.8-0ubuntu0.2 [87.2 kB]\n",
"Fetched 1,154 kB in 2s (765 kB/s)\n",
"debconf: unable to initialize frontend: Dialog\n",
"debconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 3.)\n",
"debconf: falling back to frontend: Readline\n",
"debconf: unable to initialize frontend: Readline\n",
"debconf: (This frontend requires a controlling tty.)\n",
"debconf: falling back to frontend: Teletype\n",
"dpkg-preconfigure: unable to re-open stdin: \n",
"Selecting previously unselected package libpostproc-dev:amd64.\n",
"(Reading database ... 146442 files and directories currently installed.)\n",
"Preparing to unpack .../libpostproc-dev_7%3a3.4.8-0ubuntu0.2_amd64.deb ...\n",
"Unpacking libpostproc-dev:amd64 (7:3.4.8-0ubuntu0.2) ...\n",
"Selecting previously unselected package libavfilter-dev:amd64.\n",
"Preparing to unpack .../libavfilter-dev_7%3a3.4.8-0ubuntu0.2_amd64.deb ...\n",
"Unpacking libavfilter-dev:amd64 (7:3.4.8-0ubuntu0.2) ...\n",
"Selecting previously unselected package libavdevice-dev:amd64.\n",
"Preparing to unpack .../libavdevice-dev_7%3a3.4.8-0ubuntu0.2_amd64.deb ...\n",
"Unpacking libavdevice-dev:amd64 (7:3.4.8-0ubuntu0.2) ...\n",
"Setting up libpostproc-dev:amd64 (7:3.4.8-0ubuntu0.2) ...\n",
"Setting up libavfilter-dev:amd64 (7:3.4.8-0ubuntu0.2) ...\n",
"Setting up libavdevice-dev:amd64 (7:3.4.8-0ubuntu0.2) ...\n",
"Collecting av\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/41/b7/4b1095af7f8e87c0f54fc0a3de9472d09583eaf2e904a60f0817819fff11/av-8.0.3-cp36-cp36m-manylinux2010_x86_64.whl (37.2MB)\n",
"\u001b[K |████████████████████████████████| 37.2MB 87kB/s \n",
"\u001b[?25hInstalling collected packages: av\n",
"Successfully installed av-8.0.3\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "haCjcmzc1fZj"
},
"source": [
"def video_to_frame(path,out_path):\n",
" vidcap = cv2.VideoCapture(path)\n",
" success,image = vidcap.read()\n",
" count = 0\n",
" while success:\n",
" cv2.imwrite(os.path.join(out_path,\"{}.jpg\".format(count)), image)\n",
" success,image = vidcap.read()\n",
" count += 1\n",
"\n",
"\n",
"def extract_frames(video_path):\n",
" frames = []\n",
" video = av.open(video_path)\n",
" for frame in video.decode(0):\n",
" yield frame.to_image()"
],
"execution_count": 10,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "28LgedoB1jkz",
"outputId": "b36c9b58-c591-4a77-9002-65832be249f2"
},
"source": [
"#Anomaly videos part1\n",
"from tqdm import tqdm\n",
"\n",
"path = '/content/Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-1'\n",
"result = '/content/Dataset'\n",
"os.makedirs(result, exist_ok=True)\n",
"\n",
"for i in tqdm(os.listdir(path)):\n",
" p1 = os.path.join(path,i)\n",
" r1 = os.path.join(result,i)\n",
" if os.path.exists(r1):\n",
" continue\n",
" os.makedirs(r1,exist_ok = True)\n",
" for j in os.listdir(p1):\n",
" vid_path = os.path.join(p1,j)\n",
" r2 = os.path.join(r1,j[:-4])\n",
" os.makedirs(r2,exist_ok = True)\n",
" for j, frame in enumerate((extract_frames(vid_path))):\n",
" frame.save(os.path.join(r2, f\"{j}.jpg\"))"
],
"execution_count": 42,
"outputs": [
{
"output_type": "stream",
"text": [
"\n",
" 0%| | 0/4 [00:00<?, ?it/s]\u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
" 25%|██▌ | 1/4 [01:34<04:42, 94.28s/it]\u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
" 50%|█████ | 2/4 [03:32<03:23, 101.52s/it]\u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
" 75%|███████▌ | 3/4 [04:50<01:34, 94.39s/it] \u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
"100%|██████████| 4/4 [06:48<00:00, 102.19s/it]\n"
],
"name": "stderr"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "4zepmHES5aeH",
"outputId": "d1422809-c8ca-4cd6-8460-81e8aacb6f50"
},
"source": [
"#Anomaly videos part2\n",
"from tqdm import tqdm\n",
"path = '/content/Anomaly_Dataset/Anomaly_Videos/Anomaly-Videos-Part-2'\n",
"result = '/content/Dataset'\n",
"os.makedirs(result, exist_ok=True)\n",
"\n",
"for i in tqdm(os.listdir(path)):\n",
" p1 = os.path.join(path,i)\n",
" r1 = os.path.join(result,i)\n",
" if os.path.exists(r1):\n",
" continue\n",
" os.makedirs(r1,exist_ok = True)\n",
" for j in os.listdir(p1):\n",
" vid_path = os.path.join(p1,j)\n",
" r2 = os.path.join(r1,j[:-4])\n",
" os.makedirs(r2,exist_ok = True)\n",
" for j, frame in enumerate((extract_frames(vid_path))):\n",
" frame.save(os.path.join(r2, f\"{j}.jpg\"))"
],
"execution_count": 44,
"outputs": [
{
"output_type": "stream",
"text": [
"\n",
" 0%| | 0/3 [00:00<?, ?it/s]\u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
" 33%|███▎ | 1/3 [01:29<02:59, 89.68s/it]\u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
" 67%|██████▋ | 2/3 [03:26<01:37, 97.68s/it]\u001b[Astream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"stream 0, timescale not set\n",
"\n",
"100%|██████████| 3/3 [05:49<00:00, 116.45s/it]\n"
],
"name": "stderr"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "jSdh8MM_6Mtj",
"outputId": "515995ba-90fa-4583-ecde-00456fdce1f1"
},
"source": [
"#Normal class\n",
"from tqdm import tqdm\n",
"path = '/content/Anomaly_Dataset/Anomaly_Videos/Normal-Videos-Part-1'\n",
"result = '/content/Dataset/normal'\n",
"os.makedirs(result, exist_ok=True)\n",
"\n",
"for i in tqdm(os.listdir(path)):\n",
" p1 = os.path.join(path,i)\n",
" r1 = os.path.join(result,i[:-4])\n",
" if os.path.exists(r1):\n",
" continue\n",
" os.makedirs(r1,exist_ok = True)\n",
" for k, frame in enumerate((extract_frames(p1))):\n",
" frame.save(os.path.join(r1, f\"{k}.jpg\"))"
],
"execution_count": 45,
"outputs": [
{
"output_type": "stream",
"text": [
"\n",
" 0%| | 0/12 [00:00<?, ?it/s]\u001b[Astream 0, timescale not set\n",
"\n",
" 8%|▊ | 1/12 [00:01<00:18, 1.65s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 17%|█▋ | 2/12 [00:07<00:28, 2.90s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 25%|██▌ | 3/12 [00:12<00:31, 3.48s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 33%|███▎ | 4/12 [00:13<00:23, 2.92s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 42%|████▏ | 5/12 [00:23<00:34, 5.00s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 50%|█████ | 6/12 [00:33<00:37, 6.33s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 58%|█████▊ | 7/12 [00:35<00:25, 5.03s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 67%|██████▋ | 8/12 [00:38<00:18, 4.64s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 75%|███████▌ | 9/12 [00:42<00:12, 4.18s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 83%|████████▎ | 10/12 [00:46<00:08, 4.23s/it]\u001b[Astream 0, timescale not set\n",
"\n",
" 92%|█████████▏| 11/12 [00:50<00:04, 4.31s/it]\u001b[Astream 0, timescale not set\n",
"\n",
"100%|██████████| 12/12 [01:08<00:00, 5.69s/it]\n"
],
"name": "stderr"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "s69Wyw-iFP9k",
"outputId": "2f7bf24d-9061-4a34-85c3-cab3af2f3ca9"
},
"source": [
"#!apt install tree -q\n",
"\n",
"!tree Dataset --filelimit=15"
],
"execution_count": 55,
"outputs": [
{
"output_type": "stream",
"text": [
"Dataset\n",
"├── Abuse\n",
"│   ├── Abuse001_x264 [2729 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse002_x264 [865 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse003_x264 [3699 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse004_x264 [16794 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse005_x264 [949 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse006_x264 [4380 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse007_x264 [1150 entries exceeds filelimit, not opening dir]\n",
"│   ├── Abuse008_x264 [8406 entries exceeds filelimit, not opening dir]\n",
"│   └── Abuse009_x264 [1001 entries exceeds filelimit, not opening dir]\n",
"├── Arrest\n",
"│   ├── Arrest001_x264 [2374 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest002_x264 [1790 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest003_x264 [3054 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest004_x264 [3603 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest005_x264 [3685 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest006_x264 [3090 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest007_x264 [3144 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arrest008_x264 [6388 entries exceeds filelimit, not opening dir]\n",
"│   └── Arrest009_x264 [1713 entries exceeds filelimit, not opening dir]\n",
"├── Arson\n",
"│   ├── Arson001_x264 [4187 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arson002_x264 [4439 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arson003_x264 [4013 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arson005_x264 [1174 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arson006_x264 [2815 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arson007_x264 [6252 entries exceeds filelimit, not opening dir]\n",
"│   ├── Arson008_x264 [1220 entries exceeds filelimit, not opening dir]\n",
"│   └── Arson009_x264 [743 entries exceeds filelimit, not opening dir]\n",
"├── Assault\n",
"│   ├── Assault001_x264 [2486 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault002_x264 [2523 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault003_x264 [4443 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault004_x264 [3223 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault005_x264 [1217 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault006_x264 [8096 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault007_x264 [1952 entries exceeds filelimit, not opening dir]\n",
"│   ├── Assault008_x264 [7536 entries exceeds filelimit, not opening dir]\n",
"│   └── Assault009_x264 [3599 entries exceeds filelimit, not opening dir]\n",
"├── Burglary\n",
"│   ├── Burglary001_x264 [3969 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary002_x264 [3064 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary003_x264 [1173 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary004_x264 [1787 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary005_x264 [7729 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary006_x264 [8937 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary007_x264 [1085 entries exceeds filelimit, not opening dir]\n",
"│   ├── Burglary008_x264 [4920 entries exceeds filelimit, not opening dir]\n",
"│   └── Burglary009_x264 [2557 entries exceeds filelimit, not opening dir]\n",
"├── Explosion\n",
"│   ├── Explosion001_x264 [635 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion002_x264 [4013 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion003_x264 [576 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion004_x264 [1902 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion005_x264 [693 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion006_x264 [1920 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion007_x264 [16289 entries exceeds filelimit, not opening dir]\n",
"│   ├── Explosion008_x264 [1748 entries exceeds filelimit, not opening dir]\n",
"│   └── Explosion009_x264 [1101 entries exceeds filelimit, not opening dir]\n",
"├── Fighting\n",
"│   ├── Fighting002_x264 [2688 entries exceeds filelimit, not opening dir]\n",
"│   ├── Fighting003_x264 [3102 entries exceeds filelimit, not opening dir]\n",
"│   ├── Fighting004_x264 [16777 entries exceeds filelimit, not opening dir]\n",
"│   ├── Fighting005_x264 [1784 entries exceeds filelimit, not opening dir]\n",
"│   ├── Fighting006_x264 [944 entries exceeds filelimit, not opening dir]\n",
"│   ├── Fighting007_x264 [3794 entries exceeds filelimit, not opening dir]\n",
"│   ├── Fighting008_x264 [13134 entries exceeds filelimit, not opening dir]\n",
"│   └── Fighting009_x264 [2132 entries exceeds filelimit, not opening dir]\n",
"└── normal\n",
" ├── Normal_Videos_003_x264 [2822 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_006_x264 [450 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_010_x264 [1053 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_014_x264 [1499 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_015_x264 [480 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_018_x264 [1181 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_019_x264 [2843 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_024_x264 [1076 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_025_x264 [602 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_027_x264 [4922 entries exceeds filelimit, not opening dir]\n",
" ├── Normal_Videos_033_x264 [1680 entries exceeds filelimit, not opening dir]\n",
" └── Normal_Videos_034_x264 [1318 entries exceeds filelimit, not opening dir]\n",
"\n",
"81 directories, 0 files\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "mKhyNyvu6TN9"
},
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"path = '/content/Dataset'\n",
"res = '/content/crime16'\n",
"seq_length:int = 16\n",
"\n",
"def preprocess_data(seq_length,path,res):\n",
" \n",
" dir = os.listdir(path)\n",
" \n",
" # i: category\n",
" for i in tqdm(dir):\n",
" \n",
" p1 = os.path.join(path,i)\n",
" r1 = os.path.join(res,i)\n",
" os.makedirs(r1,exist_ok = True)\n",
" \n",
" # j: image name\n",
" for j in os.listdir(p1):\n",
"\n",
" p2 = os.path.join(p1,j)\n",
" r2 = os.path.join(r1,j)\n",
" \n",
"\n",
" l = 0\n",
" skip_length = int(len(os.listdir(p2))/seq_length)\n",
" \n",
" # k: seq id\n",
" for m in range(10):\n",
" \n",
" k = m\n",
" while(l!=seq_length):\n",
"\n",
" p3 = os.path.join(p2,str(k) + \".jpg\")\n",
" try:\n",
" img = cv2.imread(p3)\n",
" img = cv2.resize(img,(128,128))\n",
" except:\n",
" print(p3)\n",
" \n",
" if(k==0):\n",
" img1 = img\n",
" else:\n",
" img1 = np.append(img1,img,axis = 1)\n",
" k = k+skip_length\n",
" l = l+1 \n",
"\n",
" cv2.imwrite(r2 + str(m)+\".jpg\",img1)"
],
"execution_count": 68,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "T1CWPKzWGTkr",
"outputId": "723329e2-6dab-4e1b-e10c-4ba1658608c0"
},
"source": [
"preprocess_data(seq_length,path,res)"
],
"execution_count": 69,
"outputs": [
{
"output_type": "stream",
"text": [
"\n",
"\n",
" 0%| | 0/8 [00:00<?, ?it/s]\u001b[A\u001b[A\n",
"\n",
" 12%|█▎ | 1/8 [00:00<00:04, 1.61it/s]\u001b[A\u001b[A\n",
"\n",
" 25%|██▌ | 2/8 [00:01<00:03, 1.61it/s]\u001b[A\u001b[A\n",
"\n",
" 38%|███▊ | 3/8 [00:01<00:02, 1.70it/s]\u001b[A\u001b[A\n",
"\n",
" 50%|█████ | 4/8 [00:02<00:02, 1.71it/s]\u001b[A\u001b[A\n",
"\n",
" 62%|██████▎ | 5/8 [00:02<00:01, 1.67it/s]\u001b[A\u001b[A\n",
"\n",
" 75%|███████▌ | 6/8 [00:03<00:01, 1.65it/s]\u001b[A\u001b[A\n",
"\n",
" 88%|████████▊ | 7/8 [00:04<00:00, 1.69it/s]\u001b[A\u001b[A\n",
"\n",
"100%|██████████| 8/8 [00:05<00:00, 1.60it/s]\n"
],
"name": "stderr"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "qydLhww3GXy8",
"outputId": "70790d75-d048-4580-8601-5612e25d0897"
},
"source": [
"!tree crime16 --filelimit=20"
],
"execution_count": 71,
"outputs": [
{
"output_type": "stream",
"text": [
"crime16\n",
"├── Abuse [90 entries exceeds filelimit, not opening dir]\n",
"├── Arrest [90 entries exceeds filelimit, not opening dir]\n",
"├── Arson [80 entries exceeds filelimit, not opening dir]\n",
"├── Assault [90 entries exceeds filelimit, not opening dir]\n",
"├── Burglary [90 entries exceeds filelimit, not opening dir]\n",
"├── Explosion [90 entries exceeds filelimit, not opening dir]\n",
"├── Fighting [80 entries exceeds filelimit, not opening dir]\n",
"└── normal [120 entries exceeds filelimit, not opening dir]\n",
"\n",
"8 directories, 0 files\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "pu2DooqlHYW2",
"outputId": "2891401d-9e8a-4f70-fb4d-0c517ba8b975"
},
"source": [
"!ls crime16/Abuse -l"
],
"execution_count": 72,
"outputs": [
{
"output_type": "stream",
"text": [
"total 9080\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2640.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2641.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2642.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2643.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2644.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2645.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2646.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2647.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2648.jpg\n",
"-rw-r--r-- 1 root root 108229 Feb 20 11:04 Abuse001_x2649.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2640.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2641.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2642.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2643.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2644.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2645.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2646.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2647.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2648.jpg\n",
"-rw-r--r-- 1 root root 152516 Feb 20 11:04 Abuse002_x2649.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2640.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2641.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2642.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2643.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2644.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2645.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2646.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2647.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2648.jpg\n",
"-rw-r--r-- 1 root root 55662 Feb 20 11:04 Abuse003_x2649.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2640.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2641.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2642.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2643.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2644.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2645.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2646.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2647.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2648.jpg\n",
"-rw-r--r-- 1 root root 74585 Feb 20 11:04 Abuse004_x2649.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2640.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2641.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2642.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2643.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2644.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2645.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2646.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2647.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2648.jpg\n",
"-rw-r--r-- 1 root root 89490 Feb 20 11:04 Abuse005_x2649.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2640.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2641.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2642.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2643.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2644.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2645.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2646.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2647.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2648.jpg\n",
"-rw-r--r-- 1 root root 133314 Feb 20 11:04 Abuse006_x2649.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2640.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2641.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2642.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2643.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2644.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2645.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2646.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2647.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2648.jpg\n",
"-rw-r--r-- 1 root root 80815 Feb 20 11:04 Abuse007_x2649.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2640.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2641.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2642.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2643.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2644.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2645.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2646.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2647.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2648.jpg\n",
"-rw-r--r-- 1 root root 88289 Feb 20 11:04 Abuse008_x2649.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2640.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2641.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2642.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2643.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2644.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2645.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2646.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2647.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2648.jpg\n",
"-rw-r--r-- 1 root root 127722 Feb 20 11:04 Abuse009_x2649.jpg\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "94rkmLnqHkzX"
},
"source": [
"class OneCycle(object):\n",
" def __init__(self, nb, max_lr, momentum_vals=(0.95, 0.85), prcnt= 10 , div=10):\n",
" self.nb = nb\n",
" self.div = div\n",
" self.step_len = int(self.nb * (1- prcnt/100)/2)\n",
" self.high_lr = max_lr\n",
" self.low_mom = momentum_vals[1]\n",
" self.high_mom = momentum_vals[0]\n",
" self.prcnt = prcnt\n",
" self.iteration = 0\n",
" self.lrs = []\n",
" self.moms = []\n",
"\n",
" def calc(self):\n",
" self.iteration += 1\n",
" lr = self.calc_lr()\n",
" mom = self.calc_mom()\n",
" return (lr, mom)\n",
"\n",
" def calc_lr(self):\n",
" if self.iteration==self.nb:\n",
" self.iteration = 0\n",
" self.lrs.append(self.high_lr/self.div)\n",
" return self.high_lr/self.div\n",
" if self.iteration > 2 * self.step_len:\n",
" ratio = (self.iteration - 2 * self.step_len) / (self.nb - 2 * self.step_len)\n",
" lr = self.high_lr * ( 1 - 0.99 * ratio)/self.div\n",
" elif self.iteration > self.step_len:\n",
" ratio = 1- (self.iteration -self.step_len)/self.step_len\n",
" lr = self.high_lr * (1 + ratio * (self.div - 1)) / self.div\n",
" else :\n",
" ratio = self.iteration/self.step_len\n",
" lr = self.high_lr * (1 + ratio * (self.div - 1)) / self.div\n",
" self.lrs.append(lr)\n",
" return lr\n",
"\n",
" def calc_mom(self):\n",
" if self.iteration==self.nb:\n",
" self.iteration = 0\n",
" self.moms.append(self.high_mom)\n",
" return self.high_mom\n",
" if self.iteration > 2 * self.step_len:\n",
" mom = self.high_mom\n",
" elif self.iteration > self.step_len:\n",
" ratio = (self.iteration -self.step_len)/self.step_len\n",
" mom = self.low_mom + ratio * (self.high_mom - self.low_mom)\n",
" else :\n",
" ratio = self.iteration/self.step_len\n",
" mom = self.high_mom - ratio * (self.high_mom - self.low_mom)\n",
" self.moms.append(mom)\n",
" return mom\n",
"def update_lr(optimizer, lr):\n",
" for g in optimizer.param_groups:\n",
" g['lr'] = lr\n",
"def update_mom(optimizer, mom):\n",
" for g in optimizer.param_groups:\n",
" g['momentum'] = mom"
],
"execution_count": 73,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "lDz0Nwv-Hx_i"
},
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"from torch.autograd import Variable\n",
"from functools import partial\n",
"\n",
"__all__ = ['resnet50', 'resnet101', 'resnet152', 'resnet200']\n",
"\n",
"\n",
"def conv3x3x3(in_planes, out_planes, stride=1):\n",
" # 3x3x3 convolution with padding\n",
" return nn.Conv3d(\n",
" in_planes,\n",
" out_planes,\n",
" kernel_size=3,\n",
" stride=stride,\n",
" padding=1,\n",
" bias=False)\n",
"\n",
"\n",
"def downsample_basic_block(x, planes, stride):\n",
" out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n",
" zero_pads = torch.Tensor(out.size(0), planes - out.size(1), out.size(2), out.size(3), out.size(4)).zero_()\n",
" if isinstance(out.data, torch.cuda.FloatTensor):\n",
" zero_pads = zero_pads.cuda()\n",
" out = Variable(torch.cat([out.data, zero_pads], dim=1))\n",
"\n",
" return out\n",
"\n",
"\n",
"class Bottleneck(nn.Module):\n",
" expansion = 4\n",
"\n",
" def __init__(self, inplanes, planes, stride=1, downsample=None, head_conv=1):\n",
" super(Bottleneck, self).__init__()\n",
" if head_conv == 1:\n",
" self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n",
" self.bn1 = nn.BatchNorm3d(planes)\n",
" elif head_conv == 3:\n",
" self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=(3, 1, 1), bias=False, padding=(1, 0, 0))\n",
" self.bn1 = nn.BatchNorm3d(planes)\n",
" else:\n",
" raise ValueError(\"Unsupported head_conv!\")\n",
" self.conv2 = nn.Conv3d(\n",
" planes, planes, kernel_size=(1, 3, 3), stride=(1, stride, stride), padding=(0, 1, 1), bias=False)\n",
" self.bn2 = nn.BatchNorm3d(planes)\n",
" self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n",
" self.bn3 = nn.BatchNorm3d(planes * 4)\n",
" self.relu = nn.ReLU(inplace=True)\n",
" self.downsample = downsample\n",
" self.stride = stride\n",
"\n",
" def forward(self, x):\n",
" residual = x\n",
"\n",
" out = self.conv1(x)\n",
" out = self.bn1(out)\n",
" out = self.relu(out)\n",
"\n",
" out = self.conv2(out)\n",
" out = self.bn2(out)\n",
" out = self.relu(out)\n",
"\n",
" out = self.conv3(out)\n",
" out = self.bn3(out)\n",
"\n",
" if self.downsample is not None:\n",
" residual = self.downsample(x)\n",
"\n",
" out += residual\n",
" out = self.relu(out)\n",
"\n",
" return out\n",
"\n",
"\n",
"def get_fine_tuning_parameters(model, ft_begin_index):\n",
" if ft_begin_index == 0:\n",
" return model.parameters()\n",
"\n",
" ft_module_names = []\n",
" for i in range(ft_begin_index, 5):\n",
" ft_module_names.append('layer{}'.format(i))\n",
" ft_module_names.append('fc')\n",
"\n",
" parameters = []\n",
" for k, v in model.named_parameters():\n",
" for ft_module in ft_module_names:\n",
" if ft_module in k:\n",
" parameters.append({'params': v})\n",
" break\n",
" else:\n",
" parameters.append({'params': v, 'lr': 0.0})\n",
"\n",
" return parameters\n",
"\n",
"\n",
"class SlowFast(nn.Module):\n",
" def __init__(self, block=Bottleneck, layers=[3, 4, 6, 3], class_num=27, shortcut_type='B', dropout=0.5,\n",
" alpha=8, beta=0.125):\n",
" super(SlowFast, self).__init__()\n",
" self.alpha = alpha\n",
" self.beta = beta\n",
"\n",
" self.fast_inplanes = int(64 * beta)\n",
" fast_inplanes = self.fast_inplanes\n",
" self.fast_conv1 = nn.Conv3d(3, fast_inplanes, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3),\n",
" bias=False)\n",
" self.fast_bn1 = nn.BatchNorm3d(8)\n",
" self.fast_relu = nn.ReLU(inplace=True)\n",
" self.fast_maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))\n",
" self.fast_res1 = self._make_layer_fast(block, 8, layers[0], shortcut_type, head_conv=3)\n",
" self.fast_res2 = self._make_layer_fast(\n",
" block, 16, layers[1], shortcut_type, stride=2, head_conv=3)\n",
" self.fast_res3 = self._make_layer_fast(\n",
" block, 32, layers[2], shortcut_type, stride=2, head_conv=3)\n",
" self.fast_res4 = self._make_layer_fast(\n",
" block, 64, layers[3], shortcut_type, stride=2, head_conv=3)\n",
"\n",
" self.slow_inplanes = 64\n",
" slow_inplanes = self.slow_inplanes\n",
" self.slow_conv1 = nn.Conv3d(3, slow_inplanes, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3),\n",
" bias=False)\n",
" self.slow_bn1 = nn.BatchNorm3d(64)\n",
" self.slow_relu = nn.ReLU(inplace=True)\n",
" self.slow_maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))\n",
" self.slow_res1 = self._make_layer_slow(block, 64, layers[0], shortcut_type, head_conv=1)\n",
" self.slow_res2 = self._make_layer_slow(\n",
" block, 128, layers[1], shortcut_type, stride=2, head_conv=1)\n",
" self.slow_res3 = self._make_layer_slow(\n",
" block, 256, layers[2], shortcut_type, stride=2, head_conv=1)\n",
" self.slow_res4 = self._make_layer_slow(\n",
" block, 512, layers[3], shortcut_type, stride=2, head_conv=1)\n",
"\n",
" self.Tconv1 = nn.Conv3d(8, 16, kernel_size=(5, 1, 1), stride=(alpha, 1, 1), padding=(2, 0, 0), bias=False)\n",
" self.Tconv2 = nn.Conv3d(32, 64, kernel_size=(5, 1, 1), stride=(alpha, 1, 1), padding=(2, 0, 0), bias=False)\n",
" self.Tconv3 = nn.Conv3d(64, 128, kernel_size=(5, 1, 1), stride=(alpha, 1, 1), padding=(2, 0, 0), bias=False)\n",
" self.Tconv4 = nn.Conv3d(128, 256, kernel_size=(5, 1, 1), stride=(alpha, 1, 1), padding=(2, 0, 0), bias=False)\n",
"\n",
" self.dp = nn.Dropout(dropout)\n",
" self.fc = nn.Linear(self.fast_inplanes + self.slow_inplanes, class_num)\n",
"\n",
" def forward(self, input):\n",
" fast, Tc = self.FastPath(input[:, :, ::2, :, :])\n",
" slow = self.SlowPath(input[:, :, ::16, :, :], Tc)\n",
" x = torch.cat([slow, fast], dim=1)\n",
" x = self.dp(x)\n",
" x = self.fc(x)\n",
" return x\n",
"\n",
" def SlowPath(self, input, Tc):\n",
" x = self.slow_conv1(input)\n",
" x = self.slow_bn1(x)\n",
" x = self.slow_relu(x)\n",
" x = self.slow_maxpool(x)\n",
" x = torch.cat([x, Tc[0]], dim=1)\n",
" x = self.slow_res1(x)\n",
" x = torch.cat([x, Tc[1]], dim=1)\n",
" x = self.slow_res2(x)\n",
" x = torch.cat([x, Tc[2]], dim=1)\n",
" x = self.slow_res3(x)\n",
" x = torch.cat([x, Tc[3]], dim=1)\n",
" x = self.slow_res4(x)\n",
" x = nn.AdaptiveAvgPool3d(1)(x)\n",
" x = x.view(-1, x.size(1))\n",
" return x\n",
"\n",
" def FastPath(self, input):\n",
" x = self.fast_conv1(input)\n",
" x = self.fast_bn1(x)\n",
" x = self.fast_relu(x)\n",
" x = self.fast_maxpool(x)\n",
" Tc1 = self.Tconv1(x)\n",
" x = self.fast_res1(x)\n",
" Tc2 = self.Tconv2(x)\n",
" x = self.fast_res2(x)\n",
" Tc3 = self.Tconv3(x)\n",
" x = self.fast_res3(x)\n",
" Tc4 = self.Tconv4(x)\n",
" x = self.fast_res4(x)\n",
" x = nn.AdaptiveAvgPool3d(1)(x)\n",
" x = x.view(-1, x.size(1))\n",
" return x, [Tc1, Tc2, Tc3, Tc4]\n",
"\n",
" def _make_layer_fast(self, block, planes, blocks, shortcut_type, stride=1, head_conv=1):\n",
" downsample = None\n",
" if stride != 1 or self.fast_inplanes != planes * block.expansion:\n",
" if shortcut_type == 'A':\n",
" downsample = partial(\n",
" downsample_basic_block,\n",
" planes=planes * block.expansion,\n",
" stride=stride)\n",
" else:\n",
" downsample = nn.Sequential(\n",
" nn.Conv3d(\n",
" self.fast_inplanes,\n",
" planes * block.expansion,\n",
" kernel_size=1,\n",
" stride=(1, stride, stride),\n",
" bias=False), nn.BatchNorm3d(planes * block.expansion))\n",
"\n",
" layers = []\n",
" layers.append(block(self.fast_inplanes, planes, stride, downsample, head_conv=head_conv))\n",
" self.fast_inplanes = planes * block.expansion\n",
" for i in range(1, blocks):\n",
" layers.append(block(self.fast_inplanes, planes, head_conv=head_conv))\n",
" return nn.Sequential(*layers)\n",
"\n",
" def _make_layer_slow(self, block, planes, blocks, shortcut_type, stride=1, head_conv=1):\n",
" downsample = None\n",
" if stride != 1 or self.slow_inplanes != planes * block.expansion:\n",
" if shortcut_type == 'A':\n",
" downsample = partial(\n",
" downsample_basic_block,\n",
" planes=planes * block.expansion,\n",
" stride=stride)\n",
" else:\n",
" downsample = nn.Sequential(\n",
" nn.Conv3d(\n",
" self.slow_inplanes + self.slow_inplanes // self.alpha * 2,\n",
" planes * block.expansion,\n",
" kernel_size=1,\n",
" stride=(1, stride, stride),\n",
" bias=False), nn.BatchNorm3d(planes * block.expansion))\n",
"\n",
" layers = []\n",
" layers.append(block(self.slow_inplanes + self.slow_inplanes // self.alpha * 2, planes, stride, downsample,\n",
" head_conv=head_conv))\n",
" self.slow_inplanes = planes * block.expansion\n",
" for i in range(1, blocks):\n",
" layers.append(block(self.slow_inplanes, planes, head_conv=head_conv))\n",
"\n",
" return nn.Sequential(*layers)\n",
"\n",
"\n",
"def resnet50(**kwargs):\n",
" \"\"\"Constructs a ResNet-50 model.\n",
" \"\"\"\n",
" model = SlowFast(Bottleneck, [3, 4, 6, 3], **kwargs)\n",
" return model\n",
"\n",
"\n",
"def resnet101(**kwargs):\n",
" \"\"\"Constructs a ResNet-101 model.\n",
" \"\"\"\n",
" model = SlowFast(Bottleneck, [3, 4, 23, 3], **kwargs)\n",
" return model\n",
"\n",
"\n",
"def resnet152(**kwargs):\n",
" \"\"\"Constructs a ResNet-101 model.\n",
" \"\"\"\n",
" model = SlowFast(Bottleneck, [3, 8, 36, 3], **kwargs)\n",
" return model\n",
"\n",
"\n",
"def resnet200(**kwargs):\n",
" \"\"\"Constructs a ResNet-101 model.\n",
" \"\"\"\n",
" model = SlowFast(Bottleneck, [3, 24, 36, 3], **kwargs)\n",
" return model"
],
"execution_count": 74,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "Hq0Hvb3IH5Z3"
},
"source": [
"\"\"\"\n",
"training\n",
"\"\"\"\n",
"\n",
"import torchvision\n",
"import torch\n",
"from torch import nn\n",
"import torch.nn.functional as F\n",
"import torchvision.models as models\n",
"import torch.optim as optim\n",
"import copy\n",
"import os\n",
"from tqdm.autonotebook import tqdm\n",
"import matplotlib.pyplot as plt\n",
"from torch.utils.data import Dataset\n",
"from torchvision import transforms\n",
"from torch.utils.data import DataLoader\n",
"import numpy as np\n",
"from torch.utils.data.sampler import SubsetRandomSampler\n",
"import cv2\n",
"import sys"
],
"execution_count": 75,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "ZwxHK61EIBg-"
},
"source": [
"#Label map\n",
"data_path = '/content/crime16'\n",
"classes = os.listdir(data_path)\n",
"decoder = {}\n",
"for i in range(len(classes)):\n",
" decoder[classes[i]] = i\n",
"encoder = {}\n",
"for i in range(len(classes)):\n",
" encoder[i] = classes[i]"
],
"execution_count": 76,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "TlF2XhalIKuR",
"outputId": "1349e962-bf97-43a3-f76d-0061d9b0d375"
},
"source": [
"decoder, encoder"
],
"execution_count": 78,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"({'Abuse': 1,\n",
" 'Arrest': 0,\n",
" 'Arson': 2,\n",
" 'Assault': 4,\n",
" 'Burglary': 5,\n",
" 'Explosion': 3,\n",
" 'Fighting': 6,\n",
" 'normal': 7},\n",
" {0: 'Arrest',\n",
" 1: 'Abuse',\n",
" 2: 'Arson',\n",
" 3: 'Explosion',\n",
" 4: 'Assault',\n",
" 5: 'Burglary',\n",
" 6: 'Fighting',\n",
" 7: 'normal'})"
]
},
"metadata": {
"tags": []
},
"execution_count": 78
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "mj_06HpbIMB_"
},
"source": [
"id = list()\n",
"path = data_path\n",
"for i in os.listdir(path):\n",
" p1 = os.path.join(path,i)\n",
" for j in os.listdir(p1):\n",
" p2 = os.path.join(p1,j)\n",
" id.append((i,p2))"
],
"execution_count": 79,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "gK2ROrO3Ib28"
},
"source": [
"class video_dataset(Dataset):\n",
" def __init__(self,frame_list,sequence_length = 16,transform = None):\n",
" self.frame_list = frame_list\n",
" self.transform = transform\n",
" self.sequence_length = sequence_length\n",
" def __len__(self):\n",
" return len(self.frame_list)\n",
" def __getitem__(self,idx):\n",
" label,path = self.frame_list[idx]\n",
" img = cv2.imread(path)\n",
" seq_img = list()\n",
" for i in range(16):\n",
" img1 = img[:,128*i:128*(i+1),:]\n",
" if(self.transform):\n",
" img1 = self.transform(img1)\n",
" seq_img.append(img1)\n",
" seq_image = torch.stack(seq_img)\n",
" seq_image = seq_image.reshape(3,16,im_size,im_size)\n",
" return seq_image,decoder[label]"
],
"execution_count": 80,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "7-SqI2_TIfAK"
},
"source": [
"im_size = 128\n",
"mean = [0.4889, 0.4887, 0.4891]\n",
"std = [0.2074, 0.2074, 0.2074]\n",
"\n",
"\n",
"train_transforms = transforms.Compose([\n",
" transforms.ToPILImage(),\n",
" transforms.Resize((im_size,im_size)),\n",
" transforms.RandomHorizontalFlip(),\n",
" transforms.RandomRotation(degrees=10),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean,std)])\n",
"\n",
"train_data = video_dataset(id,sequence_length = 16,transform = train_transforms)\n",
"train_loader = DataLoader(train_data,batch_size = 8,num_workers = 4 ,shuffle = True)\n",
"dataloaders = {'train':train_loader}"
],
"execution_count": 81,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "eYWaSTe1IkLr",
"outputId": "b9e600e7-ee68-463d-873e-5489f68ad99c"
},
"source": [
"xb, yb = next(iter(train_loader))\n",
"\n",
"xb.shape, yb.shape"
],
"execution_count": 84,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(torch.Size([8, 3, 16, 128, 128]), torch.Size([8]))"
]
},
"metadata": {
"tags": []
},
"execution_count": 84
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "hSwv79BTJKun"
},
"source": [
"model = resnet50(class_num=8).to('cuda')"
],
"execution_count": 86,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "bVLA7-AIIqkb"
},
"source": [
"device = 'cuda'\n",
"cls_criterion = nn.CrossEntropyLoss().to(device)\n",
"optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum = 0.9,weight_decay = 1e-4)\n",
"num_epochs = 20\n",
"onecyc = OneCycle(len(train_loader)*num_epochs,1e-3)"
],
"execution_count": 87,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "9oAGeKDfI7Z3",
"outputId": "e6f98594-7512-418d-f8d3-f98ea221b86f"
},
"source": [
"os.makedirs('/content/drive/My Drive/weights_crime',exist_ok = True)\n",
"from torch.autograd import Variable\n",
"iteration = 0\n",
"acc_all = list()\n",
"loss_all = list()\n",
" \n",
"for epoch in range(num_epochs):\n",
" print('')\n",
" print(f\"--- Epoch {epoch} ---\")\n",
" phase1 = dataloaders.keys()\n",
" for phase in phase1:\n",
" print('')\n",
" print(f\"--- Phase {phase} ---\")\n",
" epoch_metrics = {\"loss\": [], \"acc\": []}\n",
" for batch_i, (X, y) in enumerate(dataloaders[phase]):\n",
" #iteration = iteration+1\n",
" image_sequences = Variable(X.to(device), requires_grad=True)\n",
" labels = Variable(y.to(device), requires_grad=False)\n",
" optimizer.zero_grad()\n",
" #model.lstm.reset_hidden_state()\n",
" predictions = model(image_sequences)\n",
" loss = cls_criterion(predictions, labels)\n",
" acc = 100 * (predictions.detach().argmax(1) == labels).cpu().numpy().mean()\n",
" loss.backward()\n",
" optimizer.step()\n",
" epoch_metrics[\"loss\"].append(loss.item())\n",
" epoch_metrics[\"acc\"].append(acc)\n",
" if(phase=='train'):\n",
" lr,mom = onecyc.calc()\n",
" update_lr(optimizer, lr)\n",
" update_mom(optimizer, mom)\n",
" batches_done = epoch * len(dataloaders[phase]) + batch_i\n",
" batches_left = num_epochs * len(dataloaders[phase]) - batches_done\n",
" sys.stdout.write(\n",
" \"\\r[Epoch %d/%d] [Batch %d/%d] [Loss: %f (%f), Acc: %.2f%% (%.2f%%)]\"\n",
" % (\n",
" epoch,\n",
" num_epochs,\n",
" batch_i,\n",
" len(dataloaders[phase]),\n",
" loss.item(),\n",
" np.mean(epoch_metrics[\"loss\"]),\n",
" acc,\n",
" np.mean(epoch_metrics[\"acc\"]),\n",
" )\n",
" )\n",
"\n",
" # Empty cache\n",
" if torch.cuda.is_available():\n",
" torch.cuda.empty_cache()\n",
" \n",
" print('')\n",
" print('{} , acc: {}'.format(phase,np.mean(epoch_metrics[\"acc\"])))\n",
" torch.save(model.state_dict(),'/content/drive/My Drive/weights_crime/c3d_{}.h5'.format(epoch))\n",
" if(phase=='train'):\n",
" acc_all.append(np.mean(epoch_metrics[\"acc\"]))\n",
" loss_all.append(np.mean(epoch_metrics[\"loss\"]))"
],
"execution_count": 88,
"outputs": [
{
"output_type": "stream",
"text": [
"\n",
"--- Epoch 0 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 0/20] [Batch 91/92] [Loss: 2.698787 (2.398279), Acc: 50.00% (15.08%)]\n",
"train , acc: 15.081521739130435\n",
"\n",
"--- Epoch 1 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 1/20] [Batch 91/92] [Loss: 3.108716 (2.320126), Acc: 0.00% (16.98%)]\n",
"train , acc: 16.983695652173914\n",
"\n",
"--- Epoch 2 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 2/20] [Batch 91/92] [Loss: 2.973028 (2.073473), Acc: 0.00% (28.94%)]\n",
"train , acc: 28.940217391304348\n",
"\n",
"--- Epoch 3 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 3/20] [Batch 91/92] [Loss: 2.797733 (1.711508), Acc: 50.00% (43.07%)]\n",
"train , acc: 43.07065217391305\n",
"\n",
"--- Epoch 4 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 4/20] [Batch 91/92] [Loss: 1.568861 (1.208602), Acc: 50.00% (58.83%)]\n",
"train , acc: 58.83152173913044\n",
"\n",
"--- Epoch 5 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 5/20] [Batch 91/92] [Loss: 1.994390 (0.964666), Acc: 50.00% (66.98%)]\n",
"train , acc: 66.9836956521739\n",
"\n",
"--- Epoch 6 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 6/20] [Batch 91/92] [Loss: 3.156829 (0.738548), Acc: 0.00% (75.54%)]\n",
"train , acc: 75.54347826086956\n",
"\n",
"--- Epoch 7 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 7/20] [Batch 91/92] [Loss: 5.202874 (0.760749), Acc: 50.00% (78.80%)]\n",
"train , acc: 78.80434782608695\n",
"\n",
"--- Epoch 8 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 8/20] [Batch 91/92] [Loss: 0.071312 (0.544236), Acc: 100.00% (83.29%)]\n",
"train , acc: 83.28804347826087\n",
"\n",
"--- Epoch 9 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 9/20] [Batch 91/92] [Loss: 1.651117 (0.325360), Acc: 50.00% (90.22%)]\n",
"train , acc: 90.21739130434783\n",
"\n",
"--- Epoch 10 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 10/20] [Batch 91/92] [Loss: 4.553074 (0.363002), Acc: 0.00% (88.59%)]\n",
"train , acc: 88.58695652173913\n",
"\n",
"--- Epoch 11 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 11/20] [Batch 91/92] [Loss: 0.064471 (0.287985), Acc: 100.00% (91.71%)]\n",
"train , acc: 91.71195652173913\n",
"\n",
"--- Epoch 12 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 12/20] [Batch 91/92] [Loss: 0.024137 (0.159284), Acc: 100.00% (95.52%)]\n",
"train , acc: 95.5163043478261\n",
"\n",
"--- Epoch 13 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 13/20] [Batch 91/92] [Loss: 1.043481 (0.124562), Acc: 50.00% (95.52%)]\n",
"train , acc: 95.5163043478261\n",
"\n",
"--- Epoch 14 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 14/20] [Batch 91/92] [Loss: 3.889740 (0.181129), Acc: 50.00% (94.84%)]\n",
"train , acc: 94.83695652173913\n",
"\n",
"--- Epoch 15 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 15/20] [Batch 91/92] [Loss: 0.071463 (0.240791), Acc: 100.00% (93.75%)]\n",
"train , acc: 93.75\n",
"\n",
"--- Epoch 16 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 16/20] [Batch 91/92] [Loss: 0.594148 (0.043234), Acc: 50.00% (97.83%)]\n",
"train , acc: 97.82608695652173\n",
"\n",
"--- Epoch 17 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 17/20] [Batch 91/92] [Loss: 0.084520 (0.056348), Acc: 100.00% (98.37%)]\n",
"train , acc: 98.3695652173913\n",
"\n",
"--- Epoch 18 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 18/20] [Batch 91/92] [Loss: 0.013494 (0.043123), Acc: 100.00% (98.64%)]\n",
"train , acc: 98.6413043478261\n",
"\n",
"--- Epoch 19 ---\n",
"\n",
"--- Phase train ---\n",
"[Epoch 19/20] [Batch 91/92] [Loss: 0.015681 (0.015380), Acc: 100.00% (99.59%)]\n",
"train , acc: 99.59239130434783\n"
],
"name": "stdout"
}
]
}
]
}
@gndn1004
Copy link

gndn1004 commented Aug 23, 2021

Can you tell me how to do the prediction?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment