Skip to content

Instantly share code, notes, and snippets.

@NaiveTomcat
Last active September 23, 2020 10:31
Show Gist options
  • Save NaiveTomcat/ca04d714a0c5203c55f62c429b7589e3 to your computer and use it in GitHub Desktop.
Save NaiveTomcat/ca04d714a0c5203c55f62c429b7589e3 to your computer and use it in GitHub Desktop.
CIFAR10.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "CIFAR10.ipynb",
"provenance": [],
"collapsed_sections": [],
"mount_file_id": "https://gist.github.com/NaiveTomcat/ca04d714a0c5203c55f62c429b7589e3#file-cifar10-ipynb",
"authorship_tag": "ABX9TyObQQ1NPLfxSGn7zK4+t8Dd",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/NaiveTomcat/ca04d714a0c5203c55f62c429b7589e3/cifar10.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "daSNCgr6NL3Y",
"colab_type": "code",
"colab": {}
},
"source": [
"!tar -xzf drive/My\\ Drive/cifar-10-python.tar.gz"
],
"execution_count": 2,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "s30xeG-OgR5a",
"colab_type": "code",
"colab": {}
},
"source": [
"!pip install tensorflow-gpu"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "Pvx4Si-AgTAp",
"colab_type": "code",
"colab": {}
},
"source": [
"# -*- coding: utf-8 -*-\n",
"import numpy as np\n",
"import os\n",
"import sys\n",
"import pickle\n",
"\n",
"def __load_batch__(fpath, label_key='labels'):\n",
" \"\"\"Internal utility for parsing CIFAR data.\n",
" # Arguments\n",
" fpath: path the file to parse.\n",
" label_key: key for label data in the retrieve\n",
" dictionary.\n",
" # Returns\n",
" A tuple `(data, labels)`.\n",
" \"\"\"\n",
" with open(fpath, 'rb') as f:\n",
" if sys.version_info < (3,):\n",
" d = pickle.load(f)\n",
" else:\n",
" d = pickle.load(f, encoding='bytes')\n",
" # decode utf8\n",
" d_decoded = {}\n",
" for k, v in d.items():\n",
" d_decoded[k.decode('utf8')] = v\n",
" d = d_decoded\n",
" data = d['data']\n",
" labels = d[label_key]\n",
"\n",
" data = data.reshape(data.shape[0], 3, 32, 32)\n",
" return data, labels\n",
"\n",
"\n",
"def load_data():\n",
" \"\"\"Loads CIFAR10 dataset.\n",
" # Returns\n",
" Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n",
" \"\"\"\n",
" dirname = 'cifar-10-batches-py'\n",
" path = './%s' % dirname\n",
"\n",
" num_train_samples = 50000\n",
"\n",
" x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n",
" y_train = np.empty((num_train_samples,), dtype='uint8')\n",
"\n",
" for i in range(1, 6):\n",
" fpath = os.path.join(path, 'data_batch_' + str(i))\n",
" (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n",
" y_train[(i - 1) * 10000: i * 10000]) = __load_batch__(fpath)\n",
"\n",
" fpath = os.path.join(path, 'test_batch')\n",
" x_test, y_test = __load_batch__(fpath)\n",
"\n",
" y_train = np.reshape(y_train, (len(y_train), 1))\n",
" y_test = np.reshape(y_test, (len(y_test), 1))\n",
"\n",
" x_train = x_train.transpose(0, 2, 3, 1)\n",
" x_test = x_test.transpose(0, 2, 3, 1)\n",
"\n",
" return (x_train, y_train), (x_test, y_test)\n",
"\n",
"\n",
"def test_data():\n",
" (x_train, y_train), (x_test, y_test) = load_data()\n",
" print(x_train.shape)\n",
" print(x_test.shape)\n",
" print(y_train.shape)\n",
" print(y_test.shape)"
],
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "LY2eq5RagZSq",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 261
},
"outputId": "c941ef21-889a-44f0-de03-6471ec3bf941"
},
"source": [
"\n",
"\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import tensorflow.keras as keras\n",
"from tensorflow.keras import layers\n",
"from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, add\n",
"from tensorflow.keras.layers import Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\n",
"from tensorflow.keras.models import Model, load_model, save_model\n",
"from tensorflow.keras.preprocessing import image\n",
" \n",
" \n",
"import tensorflow.keras.backend as K\n",
"\n",
"import os\n",
"import sys\n",
"import pickle\n",
"\n",
"def __load_batch__(fpath, label_key='labels'):\n",
" \"\"\"Internal utility for parsing CIFAR data.\n",
" # Arguments\n",
" fpath: path the file to parse.\n",
" label_key: key for label data in the retrieve\n",
" dictionary.\n",
" # Returns\n",
" A tuple `(data, labels)`.\n",
" \"\"\"\n",
" with open(fpath, 'rb') as f:\n",
" if sys.version_info < (3,):\n",
" d = pickle.load(f)\n",
" else:\n",
" d = pickle.load(f, encoding='bytes')\n",
" # decode utf8\n",
" d_decoded = {}\n",
" for k, v in d.items():\n",
" d_decoded[k.decode('utf8')] = v\n",
" d = d_decoded\n",
" data = d['data']\n",
" labels = d[label_key]\n",
"\n",
" data = data.reshape(data.shape[0], 3, 32, 32)\n",
" return data, labels\n",
"\n",
"\n",
"def load_data():\n",
" \"\"\"Loads CIFAR10 dataset.\n",
" # Returns\n",
" Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n",
" \"\"\"\n",
" dirname = 'cifar-10-batches-py'\n",
" path = './%s' % dirname\n",
"\n",
" num_train_samples = 10000\n",
"\n",
" x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n",
" y_train = np.empty((num_train_samples,), dtype='uint8')\n",
"\n",
" for i in range(1, 2):\n",
" fpath = os.path.join(path, 'data_batch_' + str(i))\n",
" (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n",
" y_train[(i - 1) * 10000: i * 10000]) = __load_batch__(fpath)\n",
"\n",
" fpath = os.path.join(path, 'test_batch')\n",
" x_test, y_test = __load_batch__(fpath)\n",
"\n",
" y_train = np.reshape(y_train, (len(y_train), 1))\n",
" y_test = np.reshape(y_test, (len(y_test), 1))\n",
"\n",
" x_train = x_train.transpose(0, 2, 3, 1)\n",
" x_test = x_test.transpose(0, 2, 3, 1)\n",
"\n",
" return (x_train, y_train), (x_test, y_test)\n",
"\n",
"\n",
"def test_data():\n",
" (x_train, y_train), (x_test, y_test) = load_data()\n",
" print(x_train.shape)\n",
" print(x_test.shape)\n",
" print(y_train.shape)\n",
" print(y_test.shape)\n",
" \n",
" \n",
"def Conv2D_BN(x, filters, kernel_size, strides=(1, 1), padding='same', name=None):\n",
" if name:\n",
" bn_name = name + '_bn'\n",
" conv_name = name + '_conv'\n",
" else:\n",
" bn_name = None\n",
" conv_name = None\n",
" x = Conv2D(filters, kernel_size, strides=strides, padding=padding, activation='relu', name=conv_name)(x)\n",
" x = BatchNormalization(name=bn_name)(x)\n",
" return x\n",
" \n",
" \n",
"def identity_block(input_tensor, filters, kernel_size, strides=(1, 1), is_conv_shortcuts=False):\n",
" \"\"\"\n",
" \n",
" :param input_tensor:\n",
" :param filters:\n",
" :param kernel_size:\n",
" :param strides:\n",
" :param is_conv_shortcuts: 直接连接或者投影连接\n",
" :return:\n",
" \"\"\"\n",
" x = Conv2D_BN(input_tensor, filters, kernel_size, strides=strides, padding='same')\n",
" x = Conv2D_BN(x, filters, kernel_size, padding='same')\n",
" if is_conv_shortcuts:\n",
" shortcut = Conv2D_BN(input_tensor, filters, kernel_size, strides=strides, padding='same')\n",
" x = add([x, shortcut])\n",
" else:\n",
" x = add([x, input_tensor])\n",
" return x\n",
" \n",
" \n",
"def bottleneck_block(input_tensor, filters=(64, 64, 256), strides=(1, 1), is_conv_shortcuts=False):\n",
" \"\"\"\n",
" \n",
" :param input_tensor:\n",
" :param filters:\n",
" :param strides:\n",
" :param is_conv_shortcuts: 直接连接或者投影连接\n",
" :return:\n",
" \"\"\"\n",
" filters_1, filters_2, filters_3 = filters\n",
" x = Conv2D_BN(input_tensor, filters=filters_1, kernel_size=(1, 1), strides=strides, padding='same')\n",
" x = Conv2D_BN(x, filters=filters_2, kernel_size=(3, 3))\n",
" x = Conv2D_BN(x, filters=filters_3, kernel_size=(1, 1))\n",
" if is_conv_shortcuts:\n",
" short_cut = Conv2D_BN(input_tensor, filters=filters_3, kernel_size=(1, 1), strides=strides)\n",
" x = add([x, short_cut])\n",
" else:\n",
" x = add([x, input_tensor])\n",
" return x\n",
" \n",
" \n",
"def ResNet34(input_shape=(224, 224, 3), n_classes=1000):\n",
" \"\"\"\n",
" \n",
" :param input_shape:\n",
" :param n_classes:\n",
" :return:\n",
" \"\"\"\n",
" \n",
" input_layer = Input(shape=input_shape)\n",
" x = ZeroPadding2D((3, 3))(input_layer)\n",
" # block1\n",
" x = Conv2D_BN(x, filters=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')\n",
" x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)\n",
" # block2\n",
" x = identity_block(x, filters=64, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=64, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=64, kernel_size=(3, 3))\n",
" # block3\n",
" x = identity_block(x, filters=128, kernel_size=(3, 3), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = identity_block(x, filters=128, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=128, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=128, kernel_size=(3, 3))\n",
" # block4\n",
" x = identity_block(x, filters=256, kernel_size=(3, 3), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = identity_block(x, filters=256, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=256, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=256, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=256, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=256, kernel_size=(3, 3))\n",
" # block5\n",
" x = identity_block(x, filters=512, kernel_size=(3, 3), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = identity_block(x, filters=512, kernel_size=(3, 3))\n",
" x = identity_block(x, filters=512, kernel_size=(3, 3))\n",
" x = AveragePooling2D(pool_size=(7, 7))(x)\n",
" x = Flatten()(x)\n",
" x = Dense(n_classes, activation='softmax')(x)\n",
" \n",
" model = Model(inputs=input_layer, outputs=x)\n",
" return model\n",
" \n",
" \n",
"def ResNet50(input_shape=(224, 224, 3), n_classes=1000):\n",
" \"\"\"\n",
" \n",
" :param input_shape:\n",
" :param n_classes:\n",
" :return:\n",
" \"\"\"\n",
" input_layer = Input(shape=input_shape)\n",
" x = ZeroPadding2D((3, 3))(input_layer)\n",
" # block1\n",
" x = Conv2D_BN(x, filters=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')\n",
" x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)\n",
" \n",
" # block2\n",
" x = bottleneck_block(x, filters=(64, 64, 256), strides=(1, 1), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(64, 64, 256))\n",
" x = bottleneck_block(x, filters=(64, 64, 256))\n",
" # block3\n",
" x = bottleneck_block(x, filters=(128, 128, 512), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(128, 128, 512))\n",
" x = bottleneck_block(x, filters=(128, 128, 512))\n",
" x = bottleneck_block(x, filters=(128, 128, 512))\n",
" # block4\n",
" x = bottleneck_block(x, filters=(256, 256, 1024), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" # block5\n",
" x = bottleneck_block(x, filters=(512, 512, 2048), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(512, 512, 2048))\n",
" x = bottleneck_block(x, filters=(512, 512, 2048))\n",
" x = AveragePooling2D(pool_size=(7, 7))(x)\n",
" x = Flatten()(x)\n",
" x = Dense(n_classes, activation='softmax')(x)\n",
" \n",
" model = Model(inputs=input_layer, outputs=x)\n",
" return model\n",
" \n",
"def ResNet50_32(input_shape=(32, 32, 3), n_classes=1000):\n",
" \"\"\"\n",
" \n",
" :param input_shape:\n",
" :param n_classes:\n",
" :return:\n",
" \"\"\"\n",
"# input_layer = Input(shape=input_shape)\n",
" input_layer = Input(shape=(32,32,3))\n",
" x = keras.layers.Lambda(lambda img: K.resize_images(img, 7, 7, data_format='channels_last') ,input_shape=(32, 32, 3))(input_layer)\n",
" x = ZeroPadding2D(padding=3)(x)\n",
" # block1\n",
" x = Conv2D_BN(x, filters=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')\n",
" x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)\n",
"\n",
" # block2\n",
" x = bottleneck_block(x, filters=(64, 64, 256), strides=(1, 1), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(64, 64, 256))\n",
" x = bottleneck_block(x, filters=(64, 64, 256))\n",
" # block3\n",
" x = bottleneck_block(x, filters=(128, 128, 512), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(128, 128, 512))\n",
" x = bottleneck_block(x, filters=(128, 128, 512))\n",
" x = bottleneck_block(x, filters=(128, 128, 512))\n",
" # block4\n",
" x = bottleneck_block(x, filters=(256, 256, 1024), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" x = bottleneck_block(x, filters=(256, 256, 1024))\n",
" # block5\n",
" x = bottleneck_block(x, filters=(512, 512, 2048), strides=(2, 2), is_conv_shortcuts=True)\n",
" x = bottleneck_block(x, filters=(512, 512, 2048))\n",
" x = bottleneck_block(x, filters=(512, 512, 2048))\n",
" x = AveragePooling2D(pool_size=(7, 7))(x)\n",
" x = Flatten()(x)\n",
" x = Dense(n_classes, activation='softmax')(x)\n",
"\n",
" model = Model(inputs=input_layer, outputs=x)\n",
" return model\n",
"# 生成训练数据\n",
" \n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
" \n",
"img_size = (32,32)\n",
"train_gen = ImageDataGenerator( validation_split=0.2)\n",
"\n",
"(x_train_1, y_train), (x_test_1, y_test) = load_data()\n",
"\n",
"import scipy\n",
"import scipy.misc\n",
"from skimage.transform import resize\n",
"new_shape = (244,244,3)\n",
"\n",
"print(\"Generating Data\")\n",
"\n",
"#x_train = np.empty(shape=(x_train_1.shape[0],)+new_shape)\n",
"#for idx in range(x_train_1.shape[0]):\n",
"# x_train[idx] = resize(x_train_1[idx], new_shape)\n",
"# if idx%100==0:\n",
"# print(idx,\"/\",x_train_1.shape[0])\n",
"x_train = x_train_1.astype(np.float32)\n",
"x_train = x_train / 255\n",
"#x_test = np.empty(shape=(x_test_1.shape[0],)+new_shape)\n",
"#for idx in range(x_test_1.shape[0]):\n",
"# x_test[idx] = resize(x_test_1[idx], new_shape)\n",
"\n",
"train_generator = train_gen.flow(x=x_train,y=y_train,\n",
" batch_size=32,\n",
" subset='training',\n",
" )\n",
"valid_generator = train_gen.flow(x=x_train,y=y_train,\n",
" batch_size=32,\n",
" subset='validation'\n",
" )\n",
" \n",
"# 编译模型\n",
"resize = keras.layers.Lambda(lambda img: K.resize_images(img, 7, 7, data_format='channels_last'), input_shape=(32, 32, 3),dtype=np.float32)\n",
"resnet34, resnet50_1 = ResNet34(n_classes=10), ResNet50(input_shape=(224,224,3),n_classes=10)\n",
"opt=keras.optimizers.SGD(learning_rate=0.01,)\n",
" \n",
"predef_resnet50 = ResNet50_32(n_classes=10)\n",
" \n",
"reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto',factor=0.1)\n",
"#resnet34.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n",
"#resnet50.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n",
"predef_resnet50.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n",
" \n",
"# 训练\n",
" \n",
"#istory_34 = resnet34.fit(train_generator, \n",
" # steps_per_epoch=train_generator.n//train_generator.batch_size, \n",
" # validation_data=valid_generator, \n",
" # validation_steps=valid_generator.n//valid_generator.batch_size, \n",
" # epochs=50, \n",
" # )\n",
"#save_model(resnet34,\"/content/drive/My Drive/resnet34.net\")\n",
" \n",
"predef_history_50 = predef_resnet50.fit(train_generator, \n",
" steps_per_epoch=train_generator.n//train_generator.batch_size, \n",
" validation_data=valid_generator, \n",
" validation_steps=valid_generator.n//valid_generator.batch_size, \n",
" epochs=100, \n",
"# callbacks=[reduce_lr],\n",
" )\n",
" \n",
"#save_model(resnet50,\"/content/drive/My Drive/resnet50.net\")\n",
" \n",
"#history34 = history_34\n",
"predef_history50 = predef_history_50\n",
" \n",
"# 绘制结果曲线\n",
" \n",
"import matplotlib.pyplot as plt\n",
" \n",
"plt.figure(figsize=(16, 8))\n",
"plt.subplot(1, 2, 1)\n",
"#plt.plot(np.arange(len(history34.history['loss'])), history34.history['loss'], label='resnet34 train loss')\n",
"plt.plot(np.arange(len(predef_history50.history['loss'])), predef_history50.history['loss'], label='predef_resnet50 train loss')\n",
"plt.legend(loc=0)\n",
" \n",
"plt.subplot(1, 2, 2)\n",
"#plt.plot(np.arange(len(history34.history['val_loss'])), history34.history['val_loss'], label='resnet34 valid loss')\n",
"plt.plot(np.arange(len(predef_history50.history['val_loss'])), predef_history50.history['val_loss'], label='predef_resnet50 valid loss')\n",
"plt.legend(loc=0)\n",
"plt.savefig('/content/drive/My Drive/predef_loss50.png')\n",
" \n",
"plt.figure(figsize=(16, 8))\n",
"plt.subplot(1, 2, 1)\n",
"#plt.plot(np.arange(len(history34.history['accuracy'])), history34.history['accuracy'], label='resnet34 train acc')\n",
"plt.plot(np.arange(len(predef_history50.history['accuracy'])), predef_history50.history['accuracy'], label='predef_resnet50 train acc')\n",
"plt.legend(loc=0)\n",
" \n",
"plt.subplot(1, 2, 2)\n",
"#plt.plot(np.arange(len(history34.history['val_accuracy'])), history34.history['val_accuracy'], label='resnet34 valid acc')\n",
"plt.plot(np.arange(len(predef_history50.history['val_accuracy'])), predef_history50.history['val_accuracy'], label='predef_resnet50 valid acc')\n",
"plt.legend(loc=0)\n",
" \n",
"plt.savefig('/content/drive/My Drive/predef_accuracy50.png')\n",
" \n",
"# 保存模型到云端硬盘"
],
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"text": [
"Generating Data\n"
],
"name": "stdout"
},
{
"output_type": "error",
"ename": "NameError",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-4-7e1fe51a651a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 301\u001b[0m \u001b[0mreduce_lr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mReduceLROnPlateau\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmonitor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'val_loss'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpatience\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'auto'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfactor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 302\u001b[0m \u001b[0mresnet34\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'categorical_crossentropy'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mopt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'accuracy'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 303\u001b[0;31m \u001b[0mresnet50\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'categorical_crossentropy'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mopt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'accuracy'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 304\u001b[0m \u001b[0mpredef_resnet50\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'categorical_crossentropy'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mopt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'accuracy'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 305\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'resnet50' is not defined"
]
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment