Skip to content

Instantly share code, notes, and snippets.

@parth29-vc
Created January 19, 2022 03:58
Show Gist options
  • Save parth29-vc/29029bd2989bfc994fedfb976af65b16 to your computer and use it in GitHub Desktop.
Save parth29-vc/29029bd2989bfc994fedfb976af65b16 to your computer and use it in GitHub Desktop.
tf2.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "tf2.ipynb",
"provenance": [],
"collapsed_sections": [],
"authorship_tag": "ABX9TyPZq71fr9TOspVfLEgnHLyg",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/parth29-vc/29029bd2989bfc994fedfb976af65b16/tf2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "8leA4hf5aY0G",
"outputId": "991d4b66-c6b2-43ba-8b25-5c9f6c9eb8e5"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mon Jan 17 10:54:32 2022 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 495.46 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"| | | MIG M. |\n",
"|===============================+======================+======================|\n",
"| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n",
"| N/A 49C P0 28W / 250W | 0MiB / 16280MiB | 0% Default |\n",
"| | | N/A |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: |\n",
"| GPU GI CI PID Type Process name GPU Memory |\n",
"| ID ID Usage |\n",
"|=============================================================================|\n",
"| No running processes found |\n",
"+-----------------------------------------------------------------------------+\n"
]
}
],
"source": [
"!nvidia-smi"
]
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "Hjtm61Jn2RGc"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "e3QMklSyap2O",
"outputId": "edb84ec7-c678-4348-dd88-30e02e15eb60"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mounted at /content/drive\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd /content/drive/MyDrive"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "W-8hwcHLaqQM",
"outputId": "802e0add-a8ca-46ea-ea6b-b90611234866"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/drive/MyDrive\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd tf2_golf_trolley/"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "OCRFmejxa4Jn",
"outputId": "0310516f-3e35-4df3-9b83-a8833da7d3fe"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/drive/MyDrive/tf2_golf_trolley\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"import os\n",
"import cv2\n",
"import numpy as np\n",
"from glob import glob\n",
"from scipy.io import loadmat\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers"
],
"metadata": {
"id": "d4MCLZGlbMGn"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!pip list | grep tensor"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "yGSb9PQT2oJc",
"outputId": "959b27a6-bd8e-40a5-dc6c-ecbc3ddbb438"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"tensorboard 2.7.0\n",
"tensorboard-data-server 0.6.1\n",
"tensorboard-plugin-wit 1.8.1\n",
"tensorflow 2.7.0\n",
"tensorflow-datasets 4.0.1\n",
"tensorflow-estimator 2.7.0\n",
"tensorflow-gcs-config 2.7.0\n",
"tensorflow-hub 0.12.0\n",
"tensorflow-io-gcs-filesystem 0.23.1\n",
"tensorflow-metadata 1.5.0\n",
"tensorflow-probability 0.15.0\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!ls"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "tQYRJugQbObo",
"outputId": "ffd12af7-1098-4e7e-f882-2163592c5742"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"data_dir\t ngrok\n",
"deeplab.py\t ngrok-stable-linux-amd64.zip\n",
"human_colormap.mat ngrok-stable-linux-amd64.zip.1\n",
"inp\t\t __pycache__\n",
"inp_cropped_png url.txt\n",
"logs\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!ls"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "31yOrVgjbv9Y",
"outputId": "7bb23279-6df9-48db-9b63-ec1ee9ba7fe7"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"data_dir\t ngrok\n",
"deeplab.py\t ngrok-stable-linux-amd64.zip\n",
"human_colormap.mat ngrok-stable-linux-amd64.zip.1\n",
"inp\t\t __pycache__\n",
"inp_cropped_png url.txt\n",
"logs\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"IMAGE_SIZE = 1024\n",
"BATCH_SIZE = 2\n",
"NUM_CLASSES = 8\n",
"DATA_DIR = \"./data_dir/\"\n",
"NUM_TRAIN_IMAGES = 100\n",
"NUM_VAL_IMAGES = 9"
],
"metadata": {
"id": "R5lG9lrufJrw"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"train_images = sorted(glob(os.path.join(DATA_DIR, \"Images/*\")))[:NUM_TRAIN_IMAGES]\n",
"train_masks = sorted(glob(os.path.join(DATA_DIR, \"Category_ids/*\")))[:NUM_TRAIN_IMAGES]\n",
"val_images = sorted(glob(os.path.join(DATA_DIR, \"Images/*\")))[\n",
" NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES\n",
"]\n",
"val_masks = sorted(glob(os.path.join(DATA_DIR, \"Category_ids/*\")))[\n",
" NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES\n",
"]"
],
"metadata": {
"id": "YSVZTjaSw6WR"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"def read_image(image_path, mask=False):\n",
" image = tf.io.read_file(image_path)\n",
" if mask:\n",
" image = tf.image.decode_png(image, channels=1)\n",
" image.set_shape([None, None, 1])\n",
" image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])\n",
" else:\n",
" image = tf.image.decode_png(image, channels=3)\n",
" image.set_shape([None, None, 3])\n",
" image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])\n",
" # image = image / 127.5 - 1\n",
" print(image.shape)\n",
" return image\n",
"\n",
"\n",
"def load_data(image_list, mask_list):\n",
" image = read_image(image_list)\n",
" mask = read_image(mask_list, mask=True)\n",
" return image, mask\n",
"\n",
"\n",
"def data_generator(image_list, mask_list):\n",
" dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))\n",
" dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)\n",
" dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)\n",
" return dataset\n",
"\n"
],
"metadata": {
"id": "GdyDtLx6xKj4"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"train_dataset = data_generator(train_images, train_masks)\n",
"val_dataset = data_generator(val_images, val_masks)\n",
"\n",
"print(\"Train Dataset:\", train_dataset)\n",
"print(\"Val Dataset:\", val_dataset)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "t9iefBZdxO19",
"outputId": "ce6e0b46-3426-444d-8e58-e36e31e14242"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"(1024, 1024, 3)\n",
"(1024, 1024, 3)\n",
"Train Dataset: <BatchDataset shapes: ((2, 1024, 1024, 3), (2, 1024, 1024, 1)), types: (tf.float32, tf.float32)>\n",
"Val Dataset: <BatchDataset shapes: ((2, 1024, 1024, 3), (2, 1024, 1024, 1)), types: (tf.float32, tf.float32)>\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"def convolution_block(\n",
" block_input,\n",
" num_filters=256,\n",
" kernel_size=3,\n",
" dilation_rate=1,\n",
" padding=\"same\",\n",
" use_bias=False,\n",
"):\n",
" x = layers.Conv2D(\n",
" num_filters,\n",
" kernel_size=kernel_size,\n",
" dilation_rate=dilation_rate,\n",
" padding=\"same\",\n",
" use_bias=use_bias,\n",
" kernel_initializer=keras.initializers.HeNormal(),\n",
" )(block_input)\n",
" x = layers.BatchNormalization()(x)\n",
" return tf.nn.relu(x)\n",
"\n",
"\n",
"def DilatedSpatialPyramidPooling(dspp_input):\n",
" dims = dspp_input.shape\n",
" x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)\n",
" x = convolution_block(x, kernel_size=1, use_bias=True)\n",
" out_pool = layers.UpSampling2D(\n",
" size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation=\"bilinear\",\n",
" )(x)\n",
"\n",
" out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)\n",
" out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)\n",
" out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)\n",
" out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)\n",
"\n",
" x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])\n",
" output = convolution_block(x, kernel_size=1)\n",
" return output"
],
"metadata": {
"id": "WLc8JOOWxRLQ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "oA688YSmrbgb"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"def DeeplabV3Plus(image_size, num_classes):\n",
" model_input = keras.Input(shape=(image_size, image_size, 3))\n",
" resnet50 = keras.applications.ResNet101(\n",
" weights=\"imagenet\", include_top=False, input_tensor=model_input\n",
" )\n",
" x = resnet50.get_layer(\"conv4_block6_2_relu\").output\n",
" x = DilatedSpatialPyramidPooling(x)\n",
"\n",
" input_a = layers.UpSampling2D(\n",
" size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),\n",
" interpolation=\"bilinear\",\n",
" )(x)\n",
" input_b = resnet50.get_layer(\"conv2_block3_2_relu\").output\n",
" input_b = convolution_block(input_b, num_filters=48, kernel_size=1)\n",
"\n",
" x = layers.Concatenate(axis=-1)([input_a, input_b])\n",
" x = convolution_block(x)\n",
" x = convolution_block(x)\n",
" x = layers.UpSampling2D(\n",
" size=(image_size // x.shape[1], image_size // x.shape[2]),\n",
" interpolation=\"bilinear\",\n",
" )(x)\n",
" model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding=\"same\")(x)\n",
" return keras.Model(inputs=model_input, outputs=model_output)\n",
"\n",
"\n",
"model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES)\n",
"model.summary()"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "AG2mCf1ZWMz0",
"outputId": "333c740c-fda8-4220-e16c-dca878e3b599"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Model: \"model_1\"\n",
"__________________________________________________________________________________________________\n",
" Layer (type) Output Shape Param # Connected to \n",
"==================================================================================================\n",
" input_3 (InputLayer) [(None, 1024, 1024, 0 [] \n",
" 3)] \n",
" \n",
" conv1_pad (ZeroPadding2D) (None, 1030, 1030, 0 ['input_3[0][0]'] \n",
" 3) \n",
" \n",
" conv1_conv (Conv2D) (None, 512, 512, 64 9472 ['conv1_pad[0][0]'] \n",
" ) \n",
" \n",
" conv1_bn (BatchNormalization) (None, 512, 512, 64 256 ['conv1_conv[0][0]'] \n",
" ) \n",
" \n",
" conv1_relu (Activation) (None, 512, 512, 64 0 ['conv1_bn[0][0]'] \n",
" ) \n",
" \n",
" pool1_pad (ZeroPadding2D) (None, 514, 514, 64 0 ['conv1_relu[0][0]'] \n",
" ) \n",
" \n",
" pool1_pool (MaxPooling2D) (None, 256, 256, 64 0 ['pool1_pad[0][0]'] \n",
" ) \n",
" \n",
" conv2_block1_1_conv (Conv2D) (None, 256, 256, 64 4160 ['pool1_pool[0][0]'] \n",
" ) \n",
" \n",
" conv2_block1_1_bn (BatchNormal (None, 256, 256, 64 256 ['conv2_block1_1_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv2_block1_1_relu (Activatio (None, 256, 256, 64 0 ['conv2_block1_1_bn[0][0]'] \n",
" n) ) \n",
" \n",
" conv2_block1_2_conv (Conv2D) (None, 256, 256, 64 36928 ['conv2_block1_1_relu[0][0]'] \n",
" ) \n",
" \n",
" conv2_block1_2_bn (BatchNormal (None, 256, 256, 64 256 ['conv2_block1_2_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv2_block1_2_relu (Activatio (None, 256, 256, 64 0 ['conv2_block1_2_bn[0][0]'] \n",
" n) ) \n",
" \n",
" conv2_block1_0_conv (Conv2D) (None, 256, 256, 25 16640 ['pool1_pool[0][0]'] \n",
" 6) \n",
" \n",
" conv2_block1_3_conv (Conv2D) (None, 256, 256, 25 16640 ['conv2_block1_2_relu[0][0]'] \n",
" 6) \n",
" \n",
" conv2_block1_0_bn (BatchNormal (None, 256, 256, 25 1024 ['conv2_block1_0_conv[0][0]'] \n",
" ization) 6) \n",
" \n",
" conv2_block1_3_bn (BatchNormal (None, 256, 256, 25 1024 ['conv2_block1_3_conv[0][0]'] \n",
" ization) 6) \n",
" \n",
" conv2_block1_add (Add) (None, 256, 256, 25 0 ['conv2_block1_0_bn[0][0]', \n",
" 6) 'conv2_block1_3_bn[0][0]'] \n",
" \n",
" conv2_block1_out (Activation) (None, 256, 256, 25 0 ['conv2_block1_add[0][0]'] \n",
" 6) \n",
" \n",
" conv2_block2_1_conv (Conv2D) (None, 256, 256, 64 16448 ['conv2_block1_out[0][0]'] \n",
" ) \n",
" \n",
" conv2_block2_1_bn (BatchNormal (None, 256, 256, 64 256 ['conv2_block2_1_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv2_block2_1_relu (Activatio (None, 256, 256, 64 0 ['conv2_block2_1_bn[0][0]'] \n",
" n) ) \n",
" \n",
" conv2_block2_2_conv (Conv2D) (None, 256, 256, 64 36928 ['conv2_block2_1_relu[0][0]'] \n",
" ) \n",
" \n",
" conv2_block2_2_bn (BatchNormal (None, 256, 256, 64 256 ['conv2_block2_2_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv2_block2_2_relu (Activatio (None, 256, 256, 64 0 ['conv2_block2_2_bn[0][0]'] \n",
" n) ) \n",
" \n",
" conv2_block2_3_conv (Conv2D) (None, 256, 256, 25 16640 ['conv2_block2_2_relu[0][0]'] \n",
" 6) \n",
" \n",
" conv2_block2_3_bn (BatchNormal (None, 256, 256, 25 1024 ['conv2_block2_3_conv[0][0]'] \n",
" ization) 6) \n",
" \n",
" conv2_block2_add (Add) (None, 256, 256, 25 0 ['conv2_block1_out[0][0]', \n",
" 6) 'conv2_block2_3_bn[0][0]'] \n",
" \n",
" conv2_block2_out (Activation) (None, 256, 256, 25 0 ['conv2_block2_add[0][0]'] \n",
" 6) \n",
" \n",
" conv2_block3_1_conv (Conv2D) (None, 256, 256, 64 16448 ['conv2_block2_out[0][0]'] \n",
" ) \n",
" \n",
" conv2_block3_1_bn (BatchNormal (None, 256, 256, 64 256 ['conv2_block3_1_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv2_block3_1_relu (Activatio (None, 256, 256, 64 0 ['conv2_block3_1_bn[0][0]'] \n",
" n) ) \n",
" \n",
" conv2_block3_2_conv (Conv2D) (None, 256, 256, 64 36928 ['conv2_block3_1_relu[0][0]'] \n",
" ) \n",
" \n",
" conv2_block3_2_bn (BatchNormal (None, 256, 256, 64 256 ['conv2_block3_2_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv2_block3_2_relu (Activatio (None, 256, 256, 64 0 ['conv2_block3_2_bn[0][0]'] \n",
" n) ) \n",
" \n",
" conv2_block3_3_conv (Conv2D) (None, 256, 256, 25 16640 ['conv2_block3_2_relu[0][0]'] \n",
" 6) \n",
" \n",
" conv2_block3_3_bn (BatchNormal (None, 256, 256, 25 1024 ['conv2_block3_3_conv[0][0]'] \n",
" ization) 6) \n",
" \n",
" conv2_block3_add (Add) (None, 256, 256, 25 0 ['conv2_block2_out[0][0]', \n",
" 6) 'conv2_block3_3_bn[0][0]'] \n",
" \n",
" conv2_block3_out (Activation) (None, 256, 256, 25 0 ['conv2_block3_add[0][0]'] \n",
" 6) \n",
" \n",
" conv3_block1_1_conv (Conv2D) (None, 128, 128, 12 32896 ['conv2_block3_out[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block1_1_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block1_1_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block1_1_relu (Activatio (None, 128, 128, 12 0 ['conv3_block1_1_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block1_2_conv (Conv2D) (None, 128, 128, 12 147584 ['conv3_block1_1_relu[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block1_2_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block1_2_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block1_2_relu (Activatio (None, 128, 128, 12 0 ['conv3_block1_2_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block1_0_conv (Conv2D) (None, 128, 128, 51 131584 ['conv2_block3_out[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block1_3_conv (Conv2D) (None, 128, 128, 51 66048 ['conv3_block1_2_relu[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block1_0_bn (BatchNormal (None, 128, 128, 51 2048 ['conv3_block1_0_conv[0][0]'] \n",
" ization) 2) \n",
" \n",
" conv3_block1_3_bn (BatchNormal (None, 128, 128, 51 2048 ['conv3_block1_3_conv[0][0]'] \n",
" ization) 2) \n",
" \n",
" conv3_block1_add (Add) (None, 128, 128, 51 0 ['conv3_block1_0_bn[0][0]', \n",
" 2) 'conv3_block1_3_bn[0][0]'] \n",
" \n",
" conv3_block1_out (Activation) (None, 128, 128, 51 0 ['conv3_block1_add[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block2_1_conv (Conv2D) (None, 128, 128, 12 65664 ['conv3_block1_out[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block2_1_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block2_1_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block2_1_relu (Activatio (None, 128, 128, 12 0 ['conv3_block2_1_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block2_2_conv (Conv2D) (None, 128, 128, 12 147584 ['conv3_block2_1_relu[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block2_2_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block2_2_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block2_2_relu (Activatio (None, 128, 128, 12 0 ['conv3_block2_2_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block2_3_conv (Conv2D) (None, 128, 128, 51 66048 ['conv3_block2_2_relu[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block2_3_bn (BatchNormal (None, 128, 128, 51 2048 ['conv3_block2_3_conv[0][0]'] \n",
" ization) 2) \n",
" \n",
" conv3_block2_add (Add) (None, 128, 128, 51 0 ['conv3_block1_out[0][0]', \n",
" 2) 'conv3_block2_3_bn[0][0]'] \n",
" \n",
" conv3_block2_out (Activation) (None, 128, 128, 51 0 ['conv3_block2_add[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block3_1_conv (Conv2D) (None, 128, 128, 12 65664 ['conv3_block2_out[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block3_1_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block3_1_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block3_1_relu (Activatio (None, 128, 128, 12 0 ['conv3_block3_1_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block3_2_conv (Conv2D) (None, 128, 128, 12 147584 ['conv3_block3_1_relu[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block3_2_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block3_2_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block3_2_relu (Activatio (None, 128, 128, 12 0 ['conv3_block3_2_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block3_3_conv (Conv2D) (None, 128, 128, 51 66048 ['conv3_block3_2_relu[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block3_3_bn (BatchNormal (None, 128, 128, 51 2048 ['conv3_block3_3_conv[0][0]'] \n",
" ization) 2) \n",
" \n",
" conv3_block3_add (Add) (None, 128, 128, 51 0 ['conv3_block2_out[0][0]', \n",
" 2) 'conv3_block3_3_bn[0][0]'] \n",
" \n",
" conv3_block3_out (Activation) (None, 128, 128, 51 0 ['conv3_block3_add[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block4_1_conv (Conv2D) (None, 128, 128, 12 65664 ['conv3_block3_out[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block4_1_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block4_1_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block4_1_relu (Activatio (None, 128, 128, 12 0 ['conv3_block4_1_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block4_2_conv (Conv2D) (None, 128, 128, 12 147584 ['conv3_block4_1_relu[0][0]'] \n",
" 8) \n",
" \n",
" conv3_block4_2_bn (BatchNormal (None, 128, 128, 12 512 ['conv3_block4_2_conv[0][0]'] \n",
" ization) 8) \n",
" \n",
" conv3_block4_2_relu (Activatio (None, 128, 128, 12 0 ['conv3_block4_2_bn[0][0]'] \n",
" n) 8) \n",
" \n",
" conv3_block4_3_conv (Conv2D) (None, 128, 128, 51 66048 ['conv3_block4_2_relu[0][0]'] \n",
" 2) \n",
" \n",
" conv3_block4_3_bn (BatchNormal (None, 128, 128, 51 2048 ['conv3_block4_3_conv[0][0]'] \n",
" ization) 2) \n",
" \n",
" conv3_block4_add (Add) (None, 128, 128, 51 0 ['conv3_block3_out[0][0]', \n",
" 2) 'conv3_block4_3_bn[0][0]'] \n",
" \n",
" conv3_block4_out (Activation) (None, 128, 128, 51 0 ['conv3_block4_add[0][0]'] \n",
" 2) \n",
" \n",
" conv4_block1_1_conv (Conv2D) (None, 64, 64, 256) 131328 ['conv3_block4_out[0][0]'] \n",
" \n",
" conv4_block1_1_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block1_1_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block1_1_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block1_1_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block1_2_conv (Conv2D) (None, 64, 64, 256) 590080 ['conv4_block1_1_relu[0][0]'] \n",
" \n",
" conv4_block1_2_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block1_2_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block1_2_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block1_2_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block1_0_conv (Conv2D) (None, 64, 64, 1024 525312 ['conv3_block4_out[0][0]'] \n",
" ) \n",
" \n",
" conv4_block1_3_conv (Conv2D) (None, 64, 64, 1024 263168 ['conv4_block1_2_relu[0][0]'] \n",
" ) \n",
" \n",
" conv4_block1_0_bn (BatchNormal (None, 64, 64, 1024 4096 ['conv4_block1_0_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv4_block1_3_bn (BatchNormal (None, 64, 64, 1024 4096 ['conv4_block1_3_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv4_block1_add (Add) (None, 64, 64, 1024 0 ['conv4_block1_0_bn[0][0]', \n",
" ) 'conv4_block1_3_bn[0][0]'] \n",
" \n",
" conv4_block1_out (Activation) (None, 64, 64, 1024 0 ['conv4_block1_add[0][0]'] \n",
" ) \n",
" \n",
" conv4_block2_1_conv (Conv2D) (None, 64, 64, 256) 262400 ['conv4_block1_out[0][0]'] \n",
" \n",
" conv4_block2_1_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block2_1_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block2_1_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block2_1_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block2_2_conv (Conv2D) (None, 64, 64, 256) 590080 ['conv4_block2_1_relu[0][0]'] \n",
" \n",
" conv4_block2_2_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block2_2_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block2_2_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block2_2_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block2_3_conv (Conv2D) (None, 64, 64, 1024 263168 ['conv4_block2_2_relu[0][0]'] \n",
" ) \n",
" \n",
" conv4_block2_3_bn (BatchNormal (None, 64, 64, 1024 4096 ['conv4_block2_3_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv4_block2_add (Add) (None, 64, 64, 1024 0 ['conv4_block1_out[0][0]', \n",
" ) 'conv4_block2_3_bn[0][0]'] \n",
" \n",
" conv4_block2_out (Activation) (None, 64, 64, 1024 0 ['conv4_block2_add[0][0]'] \n",
" ) \n",
" \n",
" conv4_block3_1_conv (Conv2D) (None, 64, 64, 256) 262400 ['conv4_block2_out[0][0]'] \n",
" \n",
" conv4_block3_1_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block3_1_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block3_1_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block3_1_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block3_2_conv (Conv2D) (None, 64, 64, 256) 590080 ['conv4_block3_1_relu[0][0]'] \n",
" \n",
" conv4_block3_2_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block3_2_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block3_2_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block3_2_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block3_3_conv (Conv2D) (None, 64, 64, 1024 263168 ['conv4_block3_2_relu[0][0]'] \n",
" ) \n",
" \n",
" conv4_block3_3_bn (BatchNormal (None, 64, 64, 1024 4096 ['conv4_block3_3_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv4_block3_add (Add) (None, 64, 64, 1024 0 ['conv4_block2_out[0][0]', \n",
" ) 'conv4_block3_3_bn[0][0]'] \n",
" \n",
" conv4_block3_out (Activation) (None, 64, 64, 1024 0 ['conv4_block3_add[0][0]'] \n",
" ) \n",
" \n",
" conv4_block4_1_conv (Conv2D) (None, 64, 64, 256) 262400 ['conv4_block3_out[0][0]'] \n",
" \n",
" conv4_block4_1_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block4_1_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block4_1_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block4_1_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block4_2_conv (Conv2D) (None, 64, 64, 256) 590080 ['conv4_block4_1_relu[0][0]'] \n",
" \n",
" conv4_block4_2_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block4_2_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block4_2_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block4_2_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block4_3_conv (Conv2D) (None, 64, 64, 1024 263168 ['conv4_block4_2_relu[0][0]'] \n",
" ) \n",
" \n",
" conv4_block4_3_bn (BatchNormal (None, 64, 64, 1024 4096 ['conv4_block4_3_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv4_block4_add (Add) (None, 64, 64, 1024 0 ['conv4_block3_out[0][0]', \n",
" ) 'conv4_block4_3_bn[0][0]'] \n",
" \n",
" conv4_block4_out (Activation) (None, 64, 64, 1024 0 ['conv4_block4_add[0][0]'] \n",
" ) \n",
" \n",
" conv4_block5_1_conv (Conv2D) (None, 64, 64, 256) 262400 ['conv4_block4_out[0][0]'] \n",
" \n",
" conv4_block5_1_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block5_1_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block5_1_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block5_1_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block5_2_conv (Conv2D) (None, 64, 64, 256) 590080 ['conv4_block5_1_relu[0][0]'] \n",
" \n",
" conv4_block5_2_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block5_2_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block5_2_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block5_2_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block5_3_conv (Conv2D) (None, 64, 64, 1024 263168 ['conv4_block5_2_relu[0][0]'] \n",
" ) \n",
" \n",
" conv4_block5_3_bn (BatchNormal (None, 64, 64, 1024 4096 ['conv4_block5_3_conv[0][0]'] \n",
" ization) ) \n",
" \n",
" conv4_block5_add (Add) (None, 64, 64, 1024 0 ['conv4_block4_out[0][0]', \n",
" ) 'conv4_block5_3_bn[0][0]'] \n",
" \n",
" conv4_block5_out (Activation) (None, 64, 64, 1024 0 ['conv4_block5_add[0][0]'] \n",
" ) \n",
" \n",
" conv4_block6_1_conv (Conv2D) (None, 64, 64, 256) 262400 ['conv4_block5_out[0][0]'] \n",
" \n",
" conv4_block6_1_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block6_1_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block6_1_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block6_1_bn[0][0]'] \n",
" n) \n",
" \n",
" conv4_block6_2_conv (Conv2D) (None, 64, 64, 256) 590080 ['conv4_block6_1_relu[0][0]'] \n",
" \n",
" conv4_block6_2_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv4_block6_2_conv[0][0]'] \n",
" ization) \n",
" \n",
" conv4_block6_2_relu (Activatio (None, 64, 64, 256) 0 ['conv4_block6_2_bn[0][0]'] \n",
" n) \n",
" \n",
" average_pooling2d_2 (AveragePo (None, 1, 1, 256) 0 ['conv4_block6_2_relu[0][0]'] \n",
" oling2D) \n",
" \n",
" conv2d_17 (Conv2D) (None, 1, 1, 256) 65792 ['average_pooling2d_2[0][0]'] \n",
" \n",
" batch_normalization_16 (BatchN (None, 1, 1, 256) 1024 ['conv2d_17[0][0]'] \n",
" ormalization) \n",
" \n",
" conv2d_18 (Conv2D) (None, 64, 64, 256) 65536 ['conv4_block6_2_relu[0][0]'] \n",
" \n",
" conv2d_19 (Conv2D) (None, 64, 64, 256) 589824 ['conv4_block6_2_relu[0][0]'] \n",
" \n",
" conv2d_20 (Conv2D) (None, 64, 64, 256) 589824 ['conv4_block6_2_relu[0][0]'] \n",
" \n",
" conv2d_21 (Conv2D) (None, 64, 64, 256) 589824 ['conv4_block6_2_relu[0][0]'] \n",
" \n",
" tf.nn.relu_16 (TFOpLambda) (None, 1, 1, 256) 0 ['batch_normalization_16[0][0]'] \n",
" \n",
" batch_normalization_17 (BatchN (None, 64, 64, 256) 1024 ['conv2d_18[0][0]'] \n",
" ormalization) \n",
" \n",
" batch_normalization_18 (BatchN (None, 64, 64, 256) 1024 ['conv2d_19[0][0]'] \n",
" ormalization) \n",
" \n",
" batch_normalization_19 (BatchN (None, 64, 64, 256) 1024 ['conv2d_20[0][0]'] \n",
" ormalization) \n",
" \n",
" batch_normalization_20 (BatchN (None, 64, 64, 256) 1024 ['conv2d_21[0][0]'] \n",
" ormalization) \n",
" \n",
" up_sampling2d_5 (UpSampling2D) (None, 64, 64, 256) 0 ['tf.nn.relu_16[0][0]'] \n",
" \n",
" tf.nn.relu_17 (TFOpLambda) (None, 64, 64, 256) 0 ['batch_normalization_17[0][0]'] \n",
" \n",
" tf.nn.relu_18 (TFOpLambda) (None, 64, 64, 256) 0 ['batch_normalization_18[0][0]'] \n",
" \n",
" tf.nn.relu_19 (TFOpLambda) (None, 64, 64, 256) 0 ['batch_normalization_19[0][0]'] \n",
" \n",
" tf.nn.relu_20 (TFOpLambda) (None, 64, 64, 256) 0 ['batch_normalization_20[0][0]'] \n",
" \n",
" concatenate_4 (Concatenate) (None, 64, 64, 1280 0 ['up_sampling2d_5[0][0]', \n",
" ) 'tf.nn.relu_17[0][0]', \n",
" 'tf.nn.relu_18[0][0]', \n",
" 'tf.nn.relu_19[0][0]', \n",
" 'tf.nn.relu_20[0][0]'] \n",
" \n",
" conv2d_22 (Conv2D) (None, 64, 64, 256) 327680 ['concatenate_4[0][0]'] \n",
" \n",
" batch_normalization_21 (BatchN (None, 64, 64, 256) 1024 ['conv2d_22[0][0]'] \n",
" ormalization) \n",
" \n",
" conv2d_23 (Conv2D) (None, 256, 256, 48 3072 ['conv2_block3_2_relu[0][0]'] \n",
" ) \n",
" \n",
" tf.nn.relu_21 (TFOpLambda) (None, 64, 64, 256) 0 ['batch_normalization_21[0][0]'] \n",
" \n",
" batch_normalization_22 (BatchN (None, 256, 256, 48 192 ['conv2d_23[0][0]'] \n",
" ormalization) ) \n",
" \n",
" up_sampling2d_6 (UpSampling2D) (None, 256, 256, 25 0 ['tf.nn.relu_21[0][0]'] \n",
" 6) \n",
" \n",
" tf.nn.relu_22 (TFOpLambda) (None, 256, 256, 48 0 ['batch_normalization_22[0][0]'] \n",
" ) \n",
" \n",
" concatenate_5 (Concatenate) (None, 256, 256, 30 0 ['up_sampling2d_6[0][0]', \n",
" 4) 'tf.nn.relu_22[0][0]'] \n",
" \n",
" conv2d_24 (Conv2D) (None, 256, 256, 25 700416 ['concatenate_5[0][0]'] \n",
" 6) \n",
" \n",
" batch_normalization_23 (BatchN (None, 256, 256, 25 1024 ['conv2d_24[0][0]'] \n",
" ormalization) 6) \n",
" \n",
" tf.nn.relu_23 (TFOpLambda) (None, 256, 256, 25 0 ['batch_normalization_23[0][0]'] \n",
" 6) \n",
" \n",
" conv2d_25 (Conv2D) (None, 256, 256, 25 589824 ['tf.nn.relu_23[0][0]'] \n",
" 6) \n",
" \n",
" batch_normalization_24 (BatchN (None, 256, 256, 25 1024 ['conv2d_25[0][0]'] \n",
" ormalization) 6) \n",
" \n",
" tf.nn.relu_24 (TFOpLambda) (None, 256, 256, 25 0 ['batch_normalization_24[0][0]'] \n",
" 6) \n",
" \n",
" up_sampling2d_7 (UpSampling2D) (None, 1024, 1024, 0 ['tf.nn.relu_24[0][0]'] \n",
" 256) \n",
" \n",
" conv2d_26 (Conv2D) (None, 1024, 1024, 2056 ['up_sampling2d_7[0][0]'] \n",
" 8) \n",
" \n",
"==================================================================================================\n",
"Total params: 11,854,152\n",
"Trainable params: 11,821,416\n",
"Non-trainable params: 32,736\n",
"__________________________________________________________________________________________________\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# saved_model = keras.models.load_model('./weights_13_01_22/weights-improvement-1500.hdf5')"
],
"metadata": {
"id": "jKw4TZ39c0O_"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
"optimizer = tf.keras.optimizers.SGD(\n",
" learning_rate=0.0001, momentum=0.1, nesterov=False, name=\"SGD\")\n",
"\n",
"model.compile(\n",
" optimizer=optimizer,\n",
" loss=loss,\n",
" metrics=[\"accuracy\"],\n",
")\n",
"filepath=\"./weights_13_01_22/weights-improvement-{epoch:02d}.hdf5\"\n",
"checkpoint1 = keras.callbacks.ModelCheckpoint(filepath, monitor='accuracy', verbose=1, save_best_only=False, save_weights_only=False,mode='max',period=10)\n",
"checkpoint2 = keras.callbacks.TensorBoard('./logs', update_freq=1)\n",
"callbacks_list = [checkpoint1,checkpoint2]\n",
"\n",
"history = model.fit(train_dataset, validation_data=val_dataset, epochs=3000,callbacks=callbacks_list,initial_epoch=0)\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 365
},
"id": "59Ul2heoWPI4",
"outputId": "939127c1-6896-4611-a259-e57ea1e3f04b"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Epoch 522/3000\n"
]
},
{
"output_type": "error",
"ename": "KeyboardInterrupt",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-42-9bf8bcabc6b6>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mcallbacks_list\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mcheckpoint1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcheckpoint2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0mhistory\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mval_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m3000\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcallbacks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcallbacks_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 65\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1214\u001b[0m _r=1):\n\u001b[1;32m 1215\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_train_batch_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1216\u001b[0;31m \u001b[0mtmp_logs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1217\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_sync\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1218\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masync_wait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/def_function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 908\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 909\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_jit_compile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 910\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 911\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 912\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/def_function.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;31m# In this case we have created variables on the first call, so we run the\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 941\u001b[0m \u001b[0;31m# defunned version which is guaranteed to never create variables.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 942\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_stateless_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=not-callable\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 943\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_stateful_fn\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 944\u001b[0m \u001b[0;31m# Release the lock early so that multiple threads can perform the call\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 3129\u001b[0m filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001b[1;32m 3130\u001b[0m return graph_function._call_flat(\n\u001b[0;32m-> 3131\u001b[0;31m filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access\n\u001b[0m\u001b[1;32m 3132\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3133\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[0;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[1;32m 1958\u001b[0m \u001b[0;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1959\u001b[0m return self._build_call_outputs(self._inference_function.call(\n\u001b[0;32m-> 1960\u001b[0;31m ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0m\u001b[1;32m 1961\u001b[0m forward_backward = self._select_forward_and_backward_functions(\n\u001b[1;32m 1962\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[1;32m 601\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 602\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mattrs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 603\u001b[0;31m ctx=ctx)\n\u001b[0m\u001b[1;32m 604\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 605\u001b[0m outputs = execute.execute_with_cancellation(\n",
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0;32m---> 59\u001b[0;31m inputs, attrs, num_outputs)\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
]
},
{
"cell_type": "code",
"source": [
"LOG_DIR = 'logs/train'\n",
"get_ipython().system_raw(\n",
" 'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'\n",
" .format(LOG_DIR)\n",
")"
],
"metadata": {
"id": "nf-g0ILV-08_"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"! curl http://localhost:6006"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "-IrCyLBl-0_u",
"outputId": "d491a457-1b0b-43f1-b899-266d7a87e04d"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"curl: (7) Failed to connect to localhost port 6006: Connection refused\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"! wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip > /dev/null 2>&1\n",
"! unzip ngrok-stable-linux-amd64.zip > /dev/null 2>&1"
],
"metadata": {
"id": "ts3eP6NI-1Dl"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"get_ipython().system_raw('./ngrok http 6006 &')"
],
"metadata": {
"id": "FFShl8cK-1HJ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"! curl -s http://localhost:4040/api/tunnels | python3 -c \"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])\""
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "5GuvMd4I_GEw",
"outputId": "5b00a8db-e9aa-4c38-fe6c-787ac0d01527"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Traceback (most recent call last):\n",
" File \"<string>\", line 1, in <module>\n",
" File \"/usr/lib/python3.7/json/__init__.py\", line 296, in load\n",
" parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)\n",
" File \"/usr/lib/python3.7/json/__init__.py\", line 348, in loads\n",
" return _default_decoder.decode(s)\n",
" File \"/usr/lib/python3.7/json/decoder.py\", line 337, in decode\n",
" obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n",
" File \"/usr/lib/python3.7/json/decoder.py\", line 355, in raw_decode\n",
" raise JSONDecodeError(\"Expecting value\", s, err.value) from None\n",
"json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"! npm install -g localtunnel"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "cYY7wRYA_GIu",
"outputId": "12ffbddf-e59d-4d3a-d20c-04ab143b2185"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[K\u001b[?25h/tools/node/bin/lt -> /tools/node/lib/node_modules/localtunnel/bin/lt.js\n",
"\u001b[K\u001b[?25h+ localtunnel@2.0.2\n",
"added 22 packages from 22 contributors in 1.625s\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"get_ipython().system_raw('lt --port 6006 >> url.txt 2>&1 &')"
],
"metadata": {
"id": "9quip4sP_GM6"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"! cat url.txt"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "3I6zgcOy_GQT",
"outputId": "fdc10db3-470d-4ec9-c9d1-0a77952a65c2"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"your url is: https://tame-catfish-90.loca.lt\n",
"your url is: https://big-eagle-74.loca.lt\n",
"your url is: https://plastic-liger-63.loca.lt\n",
"your url is: https://wonderful-panther-93.loca.lt\n"
]
}
]
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "7zJEl-7N-1K_"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "wrD6W0XU-1NV"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "YledxWJ4clbG"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"plt.plot(history.history[\"loss\"])\n",
"plt.title(\"Training Loss\")\n",
"plt.ylabel(\"loss\")\n",
"plt.xlabel(\"epoch\")\n",
"plt.show()\n",
"\n",
"plt.plot(history.history[\"accuracy\"])\n",
"plt.title(\"Training Accuracy\")\n",
"plt.ylabel(\"accuracy\")\n",
"plt.xlabel(\"epoch\")\n",
"plt.show()\n",
"\n",
"plt.plot(history.history[\"val_loss\"])\n",
"plt.title(\"Validation Loss\")\n",
"plt.ylabel(\"val_loss\")\n",
"plt.xlabel(\"epoch\")\n",
"plt.show()\n",
"\n",
"plt.plot(history.history[\"val_accuracy\"])\n",
"plt.title(\"Validation Accuracy\")\n",
"plt.ylabel(\"val_accuracy\")\n",
"plt.xlabel(\"epoch\")\n",
"plt.show()"
],
"metadata": {
"id": "BlzOkc5oWgrP"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "Vz7qN-k2tWu8"
},
"execution_count": null,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment