Skip to content

Instantly share code, notes, and snippets.

@naruarjun
Created August 24, 2018 18:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save naruarjun/0552fb53ab23818b7d44a5b3a3b05e64 to your computer and use it in GitHub Desktop.
Save naruarjun/0552fb53ab23818b7d44a5b3a3b05e64 to your computer and use it in GitHub Desktop.
MAIN CLASSIFIER TRAINED FROM SCRATCH
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Classification Model\n",
"This is a classification model I created and trained from scratch. It is a basic LeNet architecture consiting of \n",
"Convolution->Maxpool->Convolution->Maxpool"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"import os\n",
"import cv2\n",
"import skimage.io as io\n",
"import skimage.transform as trans\n",
"import keras\n",
"from keras.models import *\n",
"from keras.layers import *\n",
"from keras.optimizers import *\n",
"from keras.callbacks import ModelCheckpoint, LearningRateScheduler\n",
"from keras.models import model_from_json\n",
"from keras.preprocessing.image import ImageDataGenerator\n",
"\n",
"HEIGHT, WIDTH, CHANNEL = 130, 130, 3\n",
"BATCH_SIZE = 8\n",
"EPOCH = 10000"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def LeNet(keep_prob,input_size = (130,130,3)): \n",
" inputs = Input(input_size)\n",
" # TODO: Layer 1: Convolutional. Input = 34x34x1. Output = 32x32x10.\n",
" conv1 = Conv2D(10, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1) )(inputs)\n",
" # TODO: Pooling. Input = 32x32x10. Output = 16x16x10.\n",
" conv1 = MaxPooling2D(pool_size=(2, 2), padding = 'valid')(conv1)\n",
" # TODO: Layer 2: Convolutional. Output = 14x14x20.\n",
" conv2 = Conv2D(20, 10, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1))(conv1)\n",
" # TODO: Pooling. Input = 14x14x20. Output = 7x7x20.\n",
" conv2 = MaxPooling2D(pool_size=(2, 2), padding = 'valid')(conv2)\n",
" # TODO: Layer 2: Convolutional. Output = 5x5x40.\n",
" conv3 = Conv2D(40, 20, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1))(conv2)\n",
" dense0 = Flatten()(conv3)\n",
" # TODO: Layer 2: Fully Connected. Input = 1000. Output = 512.\n",
" dense1 = Dense(512,activation='relu', kernel_initializer = 'he_normal')(dense0)\n",
" # TODO: Layer 3: Fully Connected. Input = 512. Output = 256.\n",
" dense2 = Dense(256,activation='relu', kernel_initializer = 'he_normal')(dense1)\n",
" dense2 = Dropout(keep_prob)(dense2)\n",
" # TODO: Layer 4: Fully Connected. Input = 256. Output = 128.\n",
" dense3 = Dense(128,activation='relu', kernel_initializer = 'he_normal')(dense2)\n",
" dense3 = Dropout(keep_prob)(dense3)\n",
" # TODO: Layer 5: Fully Connected. Input = 128. Output = 43.\n",
" dense4 = Dense(8,activation='relu', kernel_initializer = 'he_normal')(dense3)\n",
" \n",
" \n",
" model = Model(input = inputs, output = dense4)\n",
" model.compile(optimizer = Adam(lr = 1e-4),loss='mean_squared_error',metrics=['accuracy'])\n",
" \n",
" print(model.summary())\n",
" return model\n",
"# def LeNet2(keep_prob,input_size = (130,130,3)): \n",
"# inputs = Input(input_size)\n",
"# # TODO: Layer 1: Convolutional. Input = 34x34x1. Output = 32x32x10.\n",
"# conv1 = Conv2D(10, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1) )(inputs)\n",
"# # TODO: Pooling. Input = 32x32x10. Output = 16x16x10.\n",
"# #conv1 = MaxPooling2D(pool_size=(2, 2), padding = 'valid')(conv1)\n",
"# # TODO: Layer 2: Convolutional. Output = 14x14x20.\n",
"# conv2 = Conv2D(20, 10, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1))(conv1)\n",
"# conv2 = Conv2D(30, 20, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1))(conv1)\n",
"# # TODO: Pooling. Input = 14x14x20. Output = 7x7x20.\n",
"# #conv2 = MaxPooling2D(pool_size=(2, 2), padding = 'valid')(conv2)\n",
"# # TODO: Layer 2: Convolutional. Output = 5x5x40.\n",
"# conv3 = Conv2D(40, 20, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal',strides = (1,1))(conv2)\n",
"# dense0 = Flatten()(conv3)\n",
"# # TODO: Layer 2: Fully Connected. Input = 1000. Output = 512.\n",
"# dense1 = Dense(512,activation='relu', kernel_initializer = 'he_normal')(dense0)\n",
"# # TODO: Layer 3: Fully Connected. Input = 512. Output = 256.\n",
"# dense2 = Dense(256,activation='relu', kernel_initializer = 'he_normal')(dense1)\n",
"# dense2 = Dropout(keep_prob)(dense2)\n",
"# # TODO: Layer 4: Fully Connected. Input = 256. Output = 128.\n",
"# dense3 = Dense(128,activation='relu', kernel_initializer = 'he_normal')(dense2)\n",
"# dense3 = Dropout(keep_prob)(dense3)\n",
"# # TODO: Layer 5: Fully Connected. Input = 128. Output = 43.\n",
"# dense4 = Dense(8,activation='relu', kernel_initializer = 'he_normal')(dense3)\n",
" \n",
" \n",
"# model = Model(input = inputs, output = dense4)\n",
"# model.compile(optimizer = Adam(lr = 1e-4),loss='mean_squared_error',metrics=['accuracy'])\n",
" \n",
"# print(model.summary())\n",
"# return model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"input_1 (InputLayer) (None, 130, 130, 3) 0 \n",
"_________________________________________________________________\n",
"conv2d_1 (Conv2D) (None, 128, 128, 10) 280 \n",
"_________________________________________________________________\n",
"max_pooling2d_1 (MaxPooling2 (None, 64, 64, 10) 0 \n",
"_________________________________________________________________\n",
"conv2d_2 (Conv2D) (None, 55, 55, 20) 20020 \n",
"_________________________________________________________________\n",
"max_pooling2d_2 (MaxPooling2 (None, 27, 27, 20) 0 \n",
"_________________________________________________________________\n",
"conv2d_3 (Conv2D) (None, 8, 8, 40) 320040 \n",
"_________________________________________________________________\n",
"flatten_1 (Flatten) (None, 2560) 0 \n",
"_________________________________________________________________\n",
"dense_1 (Dense) (None, 512) 1311232 \n",
"_________________________________________________________________\n",
"dense_2 (Dense) (None, 256) 131328 \n",
"_________________________________________________________________\n",
"dropout_1 (Dropout) (None, 256) 0 \n",
"_________________________________________________________________\n",
"dense_3 (Dense) (None, 128) 32896 \n",
"_________________________________________________________________\n",
"dropout_2 (Dropout) (None, 128) 0 \n",
"_________________________________________________________________\n",
"dense_4 (Dense) (None, 8) 1032 \n",
"=================================================================\n",
"Total params: 1,816,828\n",
"Trainable params: 1,816,828\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"None\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.5/dist-packages/ipykernel_launcher.py:26: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor(\"in..., outputs=Tensor(\"de...)`\n"
]
}
],
"source": [
"model = LeNet(0.3)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(4754, 130, 130, 3)\n"
]
}
],
"source": [
"data_dir = \"./natural_images\"\n",
"X_train = []\n",
"y_train = []\n",
"j=0\n",
"max=0\n",
"for i in os.listdir(data_dir):\n",
" temp_dir = data_dir+'/'+i\n",
" for pic in os.listdir(temp_dir):\n",
" photo = cv2.imread(temp_dir+'/'+pic)\n",
" photo = cv2.resize(photo,(130,130))\n",
" #photo = cv2.cvtColor(photo, cv2.COLOR_BGR2GRAY)\n",
" if(max<photo.shape[1]):\n",
" max = photo.shape[1]\n",
" #print(photo.shape)\n",
" X_train.append(photo)\n",
" y_train.append(j)\n",
" j=j+1\n",
"X_train = np.array(X_train)\n",
"print(X_train.shape)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"datagen = ImageDataGenerator(featurewise_center=True, samplewise_center=False, featurewise_std_normalization=True, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=90, width_shift_range=0.2, height_shift_range=0.2, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=True, vertical_flip=True, rescale=None, preprocessing_function=None, data_format=None)\n",
"#datagen = ImageDataGenerator(featurewise_center=True, samplewise_center=False, featurewise_std_normalization=True, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0, width_shift_range=0.0, height_shift_range=0.0, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(6899, 130, 130, 3)\n"
]
}
],
"source": [
"X_train = np.array(X_train)\n",
"datagen.fit(X_train)\n",
"print(X_train.shape)\n",
"from sklearn.utils import shuffle\n",
"X_train, y_train = shuffle(X_train, y_train)\n",
"from sklearn.model_selection import train_test_split\n",
"X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.33, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.1150 - acc: 0.2792 - val_loss: 0.0929 - val_acc: 0.3285\n",
"Epoch 2/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0819 - acc: 0.4310 - val_loss: 0.0767 - val_acc: 0.4941\n",
"Epoch 3/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0731 - acc: 0.5254 - val_loss: 0.0686 - val_acc: 0.5626\n",
"Epoch 4/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0675 - acc: 0.5850 - val_loss: 0.0563 - val_acc: 0.6965\n",
"Epoch 5/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0527 - acc: 0.7170 - val_loss: 0.0446 - val_acc: 0.7527\n",
"Epoch 6/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0459 - acc: 0.7547 - val_loss: 0.0394 - val_acc: 0.7870\n",
"Epoch 7/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0415 - acc: 0.7806 - val_loss: 0.0352 - val_acc: 0.8015\n",
"Epoch 8/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0370 - acc: 0.8101 - val_loss: 0.0320 - val_acc: 0.8256\n",
"Epoch 9/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0346 - acc: 0.8219 - val_loss: 0.0320 - val_acc: 0.8243\n",
"Epoch 10/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0329 - acc: 0.8301 - val_loss: 0.0307 - val_acc: 0.8256\n",
"Epoch 11/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0315 - acc: 0.8372 - val_loss: 0.0313 - val_acc: 0.8292\n",
"Epoch 12/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0300 - acc: 0.8444 - val_loss: 0.0319 - val_acc: 0.8208\n",
"Epoch 13/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0294 - acc: 0.8475 - val_loss: 0.0284 - val_acc: 0.8467\n",
"Epoch 14/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0277 - acc: 0.8566 - val_loss: 0.0278 - val_acc: 0.8454\n",
"Epoch 15/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0272 - acc: 0.8600 - val_loss: 0.0265 - val_acc: 0.8524\n",
"Epoch 16/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0264 - acc: 0.8630 - val_loss: 0.0259 - val_acc: 0.8538\n",
"Epoch 17/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0256 - acc: 0.8666 - val_loss: 0.0253 - val_acc: 0.8542\n",
"Epoch 18/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0248 - acc: 0.8703 - val_loss: 0.0251 - val_acc: 0.8634\n",
"Epoch 19/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0239 - acc: 0.8755 - val_loss: 0.0226 - val_acc: 0.8770\n",
"Epoch 20/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0236 - acc: 0.8775 - val_loss: 0.0243 - val_acc: 0.8687\n",
"Epoch 21/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0231 - acc: 0.8810 - val_loss: 0.0250 - val_acc: 0.8652\n",
"Epoch 22/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0223 - acc: 0.8842 - val_loss: 0.0253 - val_acc: 0.8612\n",
"Epoch 23/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0221 - acc: 0.8855 - val_loss: 0.0220 - val_acc: 0.8748\n",
"Epoch 24/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0214 - acc: 0.8898 - val_loss: 0.0234 - val_acc: 0.8696\n",
"Epoch 25/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0208 - acc: 0.8919 - val_loss: 0.0263 - val_acc: 0.8603\n",
"Epoch 26/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0206 - acc: 0.8930 - val_loss: 0.0230 - val_acc: 0.8726\n",
"Epoch 27/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0201 - acc: 0.8965 - val_loss: 0.0220 - val_acc: 0.8836\n",
"Epoch 28/200\n",
"1156/1155 [==============================] - 145s 125ms/step - loss: 0.0200 - acc: 0.8962 - val_loss: 0.0219 - val_acc: 0.8849\n",
"Epoch 29/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0192 - acc: 0.9009 - val_loss: 0.0234 - val_acc: 0.8735\n",
"Epoch 30/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0193 - acc: 0.9015 - val_loss: 0.0268 - val_acc: 0.8595\n",
"Epoch 31/200\n",
"1156/1155 [==============================] - 145s 125ms/step - loss: 0.0189 - acc: 0.9024 - val_loss: 0.0202 - val_acc: 0.8871\n",
"Epoch 32/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0185 - acc: 0.9036 - val_loss: 0.0212 - val_acc: 0.8854\n",
"Epoch 33/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0185 - acc: 0.9038 - val_loss: 0.0232 - val_acc: 0.8740\n",
"Epoch 34/200\n",
"1156/1155 [==============================] - 145s 125ms/step - loss: 0.0182 - acc: 0.9070 - val_loss: 0.0218 - val_acc: 0.8832\n",
"Epoch 35/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0179 - acc: 0.9068 - val_loss: 0.0210 - val_acc: 0.8876\n",
"Epoch 36/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0178 - acc: 0.9082 - val_loss: 0.0216 - val_acc: 0.8814\n",
"Epoch 37/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0178 - acc: 0.9081 - val_loss: 0.0229 - val_acc: 0.8757\n",
"Epoch 38/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0173 - acc: 0.9106 - val_loss: 0.0240 - val_acc: 0.8810\n",
"Epoch 39/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0169 - acc: 0.9139 - val_loss: 0.0218 - val_acc: 0.8805\n",
"Epoch 40/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0166 - acc: 0.9151 - val_loss: 0.0214 - val_acc: 0.8889\n",
"Epoch 41/200\n",
"1156/1155 [==============================] - 146s 127ms/step - loss: 0.0165 - acc: 0.9149 - val_loss: 0.0225 - val_acc: 0.8801\n",
"Epoch 42/200\n",
"1156/1155 [==============================] - 146s 127ms/step - loss: 0.0161 - acc: 0.9174 - val_loss: 0.0211 - val_acc: 0.8893\n",
"Epoch 43/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0165 - acc: 0.9162 - val_loss: 0.0202 - val_acc: 0.8893\n",
"Epoch 44/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0160 - acc: 0.9178 - val_loss: 0.0206 - val_acc: 0.8933\n",
"Epoch 45/200\n",
"1156/1155 [==============================] - 147s 128ms/step - loss: 0.0159 - acc: 0.9180 - val_loss: 0.0212 - val_acc: 0.8836\n",
"Epoch 46/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0156 - acc: 0.9193 - val_loss: 0.0212 - val_acc: 0.8858\n",
"Epoch 47/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0152 - acc: 0.9228 - val_loss: 0.0216 - val_acc: 0.8841\n",
"Epoch 48/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0151 - acc: 0.9230 - val_loss: 0.0228 - val_acc: 0.8775\n",
"Epoch 49/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0155 - acc: 0.9209 - val_loss: 0.0222 - val_acc: 0.8871\n",
"Epoch 50/200\n",
"1156/1155 [==============================] - 146s 127ms/step - loss: 0.0155 - acc: 0.9210 - val_loss: 0.0191 - val_acc: 0.8990\n",
"Epoch 51/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0149 - acc: 0.9230 - val_loss: 0.0194 - val_acc: 0.8955\n",
"Epoch 52/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0145 - acc: 0.9258 - val_loss: 0.0188 - val_acc: 0.9007\n",
"Epoch 53/200\n",
"1156/1155 [==============================] - 145s 126ms/step - loss: 0.0144 - acc: 0.9259 - val_loss: 0.0201 - val_acc: 0.8928\n",
"Epoch 54/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0150 - acc: 0.9234 - val_loss: 0.0189 - val_acc: 0.8990\n",
"Epoch 55/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0141 - acc: 0.9289 - val_loss: 0.0205 - val_acc: 0.8906\n",
"Epoch 56/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0138 - acc: 0.9302 - val_loss: 0.0186 - val_acc: 0.9016\n",
"Epoch 57/200\n",
"1156/1155 [==============================] - 145s 125ms/step - loss: 0.0140 - acc: 0.9281 - val_loss: 0.0203 - val_acc: 0.8950\n",
"Epoch 58/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0139 - acc: 0.9279 - val_loss: 0.0195 - val_acc: 0.8964\n",
"Epoch 59/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0139 - acc: 0.9286 - val_loss: 0.0210 - val_acc: 0.8871\n",
"Epoch 60/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0135 - acc: 0.9317 - val_loss: 0.0190 - val_acc: 0.8990\n",
"Epoch 61/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0135 - acc: 0.9321 - val_loss: 0.0198 - val_acc: 0.8937\n",
"Epoch 62/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0133 - acc: 0.9326 - val_loss: 0.0209 - val_acc: 0.8884\n",
"Epoch 63/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0133 - acc: 0.9330 - val_loss: 0.0196 - val_acc: 0.8933\n",
"Epoch 64/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0130 - acc: 0.9337 - val_loss: 0.0190 - val_acc: 0.9003\n",
"Epoch 65/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0128 - acc: 0.9347 - val_loss: 0.0211 - val_acc: 0.8880\n",
"Epoch 66/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0128 - acc: 0.9356 - val_loss: 0.0188 - val_acc: 0.8972\n",
"Epoch 67/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0128 - acc: 0.9344 - val_loss: 0.0197 - val_acc: 0.8972\n",
"Epoch 68/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0124 - acc: 0.9358 - val_loss: 0.0180 - val_acc: 0.9056\n",
"Epoch 69/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0126 - acc: 0.9361 - val_loss: 0.0197 - val_acc: 0.8950\n",
"Epoch 70/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0124 - acc: 0.9377 - val_loss: 0.0189 - val_acc: 0.8990\n",
"Epoch 71/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0122 - acc: 0.9383 - val_loss: 0.0187 - val_acc: 0.9012\n",
"Epoch 72/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0121 - acc: 0.9382 - val_loss: 0.0193 - val_acc: 0.8977\n",
"Epoch 73/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0124 - acc: 0.9362 - val_loss: 0.0205 - val_acc: 0.8906\n",
"Epoch 74/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0120 - acc: 0.9381 - val_loss: 0.0190 - val_acc: 0.9003\n",
"Epoch 75/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0118 - acc: 0.9402 - val_loss: 0.0198 - val_acc: 0.8977\n",
"Epoch 76/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0117 - acc: 0.9407 - val_loss: 0.0191 - val_acc: 0.9025\n",
"Epoch 77/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0118 - acc: 0.9389 - val_loss: 0.0189 - val_acc: 0.9021\n",
"Epoch 78/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0112 - acc: 0.9435 - val_loss: 0.0180 - val_acc: 0.9056\n",
"Epoch 79/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0116 - acc: 0.9406 - val_loss: 0.0193 - val_acc: 0.8972\n",
"Epoch 80/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0117 - acc: 0.9402 - val_loss: 0.0195 - val_acc: 0.8986\n",
"Epoch 81/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0112 - acc: 0.9435 - val_loss: 0.0176 - val_acc: 0.9095\n",
"Epoch 82/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0113 - acc: 0.9434 - val_loss: 0.0183 - val_acc: 0.9065\n",
"Epoch 83/200\n",
"1156/1155 [==============================] - 142s 123ms/step - loss: 0.0112 - acc: 0.9430 - val_loss: 0.0186 - val_acc: 0.8986\n",
"Epoch 84/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0110 - acc: 0.9440 - val_loss: 0.0191 - val_acc: 0.8994\n",
"Epoch 85/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0110 - acc: 0.9448 - val_loss: 0.0204 - val_acc: 0.8942\n",
"Epoch 86/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0110 - acc: 0.9435 - val_loss: 0.0200 - val_acc: 0.8981\n",
"Epoch 87/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0112 - acc: 0.9431 - val_loss: 0.0173 - val_acc: 0.9104\n",
"Epoch 88/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0105 - acc: 0.9478 - val_loss: 0.0222 - val_acc: 0.8884\n",
"Epoch 89/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0106 - acc: 0.9467 - val_loss: 0.0214 - val_acc: 0.8889\n",
"Epoch 90/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0107 - acc: 0.9463 - val_loss: 0.0211 - val_acc: 0.8906\n",
"Epoch 91/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0106 - acc: 0.9468 - val_loss: 0.0177 - val_acc: 0.9100\n",
"Epoch 92/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0102 - acc: 0.9494 - val_loss: 0.0193 - val_acc: 0.8968\n",
"Epoch 93/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0104 - acc: 0.9474 - val_loss: 0.0193 - val_acc: 0.9025\n",
"Epoch 94/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0103 - acc: 0.9486 - val_loss: 0.0181 - val_acc: 0.9091\n",
"Epoch 95/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0100 - acc: 0.9485 - val_loss: 0.0182 - val_acc: 0.9069\n",
"Epoch 96/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0101 - acc: 0.9498 - val_loss: 0.0163 - val_acc: 0.9148\n",
"Epoch 97/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0101 - acc: 0.9494 - val_loss: 0.0184 - val_acc: 0.9082\n",
"Epoch 98/200\n",
"1156/1155 [==============================] - 144s 125ms/step - loss: 0.0101 - acc: 0.9487 - val_loss: 0.0173 - val_acc: 0.9130\n",
"Epoch 99/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0104 - acc: 0.9481 - val_loss: 0.0190 - val_acc: 0.9043\n",
"Epoch 100/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0096 - acc: 0.9523 - val_loss: 0.0187 - val_acc: 0.9034\n",
"Epoch 101/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0098 - acc: 0.9509 - val_loss: 0.0175 - val_acc: 0.9091\n",
"Epoch 102/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0100 - acc: 0.9490 - val_loss: 0.0232 - val_acc: 0.8845\n",
"Epoch 103/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0100 - acc: 0.9502 - val_loss: 0.0172 - val_acc: 0.9100\n",
"Epoch 104/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0097 - acc: 0.9520 - val_loss: 0.0200 - val_acc: 0.8937\n",
"Epoch 105/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0096 - acc: 0.9523 - val_loss: 0.0176 - val_acc: 0.9091\n",
"Epoch 106/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0093 - acc: 0.9535 - val_loss: 0.0192 - val_acc: 0.9025\n",
"Epoch 107/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0098 - acc: 0.9501 - val_loss: 0.0187 - val_acc: 0.8990\n",
"Epoch 108/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0096 - acc: 0.9525 - val_loss: 0.0188 - val_acc: 0.9060\n",
"Epoch 109/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0097 - acc: 0.9523 - val_loss: 0.0186 - val_acc: 0.9038\n",
"Epoch 110/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0092 - acc: 0.9539 - val_loss: 0.0188 - val_acc: 0.9043\n",
"Epoch 111/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0097 - acc: 0.9514 - val_loss: 0.0181 - val_acc: 0.9117\n",
"Epoch 112/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0091 - acc: 0.9543 - val_loss: 0.0209 - val_acc: 0.8968\n",
"Epoch 113/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0092 - acc: 0.9539 - val_loss: 0.0193 - val_acc: 0.9021\n",
"Epoch 114/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0089 - acc: 0.9559 - val_loss: 0.0179 - val_acc: 0.9069\n",
"Epoch 115/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0090 - acc: 0.9547 - val_loss: 0.0195 - val_acc: 0.9065\n",
"Epoch 116/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0090 - acc: 0.9553 - val_loss: 0.0186 - val_acc: 0.9091\n",
"Epoch 117/200\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0090 - acc: 0.9554 - val_loss: 0.0181 - val_acc: 0.9095\n",
"Epoch 118/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0090 - acc: 0.9544 - val_loss: 0.0187 - val_acc: 0.9060\n",
"Epoch 119/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0091 - acc: 0.9545 - val_loss: 0.0176 - val_acc: 0.9091\n",
"Epoch 120/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0086 - acc: 0.9573 - val_loss: 0.0170 - val_acc: 0.9113\n",
"Epoch 121/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0090 - acc: 0.9545 - val_loss: 0.0181 - val_acc: 0.9069\n",
"Epoch 122/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0089 - acc: 0.9557 - val_loss: 0.0181 - val_acc: 0.9073\n",
"Epoch 123/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0088 - acc: 0.9557 - val_loss: 0.0170 - val_acc: 0.9122\n",
"Epoch 124/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0090 - acc: 0.9550 - val_loss: 0.0173 - val_acc: 0.9152\n",
"Epoch 125/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0086 - acc: 0.9564 - val_loss: 0.0177 - val_acc: 0.9029\n",
"Epoch 126/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0084 - acc: 0.9584 - val_loss: 0.0185 - val_acc: 0.9012\n",
"Epoch 127/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0083 - acc: 0.9585 - val_loss: 0.0182 - val_acc: 0.9091\n",
"Epoch 128/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0085 - acc: 0.9577 - val_loss: 0.0201 - val_acc: 0.8986\n",
"Epoch 129/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0087 - acc: 0.9583 - val_loss: 0.0198 - val_acc: 0.8994\n",
"Epoch 130/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0083 - acc: 0.9586 - val_loss: 0.0177 - val_acc: 0.9038\n",
"Epoch 131/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0084 - acc: 0.9571 - val_loss: 0.0169 - val_acc: 0.9170\n",
"Epoch 132/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0082 - acc: 0.9593 - val_loss: 0.0178 - val_acc: 0.9108\n",
"Epoch 133/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0085 - acc: 0.9578 - val_loss: 0.0172 - val_acc: 0.9157\n",
"Epoch 134/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0082 - acc: 0.9593 - val_loss: 0.0174 - val_acc: 0.9100\n",
"Epoch 135/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0083 - acc: 0.9584 - val_loss: 0.0172 - val_acc: 0.9126\n",
"Epoch 136/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0082 - acc: 0.9585 - val_loss: 0.0185 - val_acc: 0.9051\n",
"Epoch 137/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0079 - acc: 0.9608 - val_loss: 0.0179 - val_acc: 0.9091\n",
"Epoch 138/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0077 - acc: 0.9617 - val_loss: 0.0195 - val_acc: 0.9051\n",
"Epoch 139/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0080 - acc: 0.9607 - val_loss: 0.0169 - val_acc: 0.9174\n",
"Epoch 140/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0081 - acc: 0.9600 - val_loss: 0.0175 - val_acc: 0.9091\n",
"Epoch 141/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0079 - acc: 0.9615 - val_loss: 0.0201 - val_acc: 0.8990\n",
"Epoch 142/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0080 - acc: 0.9598 - val_loss: 0.0194 - val_acc: 0.9073\n",
"Epoch 143/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0078 - acc: 0.9614 - val_loss: 0.0172 - val_acc: 0.9126\n",
"Epoch 144/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0075 - acc: 0.9628 - val_loss: 0.0182 - val_acc: 0.9060\n",
"Epoch 145/200\n",
"1156/1155 [==============================] - 142s 123ms/step - loss: 0.0075 - acc: 0.9630 - val_loss: 0.0183 - val_acc: 0.9073\n",
"Epoch 146/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0078 - acc: 0.9606 - val_loss: 0.0179 - val_acc: 0.9126\n",
"Epoch 147/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0075 - acc: 0.9631 - val_loss: 0.0181 - val_acc: 0.9087\n",
"Epoch 148/200\n",
"1156/1155 [==============================] - 144s 124ms/step - loss: 0.0079 - acc: 0.9602 - val_loss: 0.0174 - val_acc: 0.9104\n",
"Epoch 149/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0074 - acc: 0.9633 - val_loss: 0.0192 - val_acc: 0.9069\n",
"Epoch 150/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0077 - acc: 0.9620 - val_loss: 0.0189 - val_acc: 0.9047\n",
"Epoch 151/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0074 - acc: 0.9639 - val_loss: 0.0187 - val_acc: 0.9082\n",
"Epoch 152/200\n",
"1156/1155 [==============================] - 143s 123ms/step - loss: 0.0073 - acc: 0.9634 - val_loss: 0.0188 - val_acc: 0.9078\n",
"Epoch 153/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0073 - acc: 0.9632 - val_loss: 0.0172 - val_acc: 0.9157\n",
"Epoch 154/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0073 - acc: 0.9633 - val_loss: 0.0187 - val_acc: 0.9091\n",
"Epoch 155/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0073 - acc: 0.9640 - val_loss: 0.0185 - val_acc: 0.9078\n",
"Epoch 156/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0073 - acc: 0.9638 - val_loss: 0.0182 - val_acc: 0.9095\n",
"Epoch 157/200\n",
"1156/1155 [==============================] - 143s 124ms/step - loss: 0.0076 - acc: 0.9622 - val_loss: 0.0168 - val_acc: 0.9126\n",
"Epoch 158/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0076 - acc: 0.9609 - val_loss: 0.0184 - val_acc: 0.9126\n",
"Epoch 159/200\n",
"1156/1155 [==============================] - 151s 131ms/step - loss: 0.0073 - acc: 0.9638 - val_loss: 0.0184 - val_acc: 0.9095\n",
"Epoch 160/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0069 - acc: 0.9653 - val_loss: 0.0184 - val_acc: 0.9095\n",
"Epoch 161/200\n",
"1156/1155 [==============================] - 150s 130ms/step - loss: 0.0070 - acc: 0.9653 - val_loss: 0.0174 - val_acc: 0.9139\n",
"Epoch 162/200\n",
"1156/1155 [==============================] - 150s 130ms/step - loss: 0.0072 - acc: 0.9649 - val_loss: 0.0175 - val_acc: 0.9135\n",
"Epoch 163/200\n",
"1156/1155 [==============================] - 152s 131ms/step - loss: 0.0068 - acc: 0.9671 - val_loss: 0.0181 - val_acc: 0.9104\n",
"Epoch 164/200\n",
"1156/1155 [==============================] - 150s 129ms/step - loss: 0.0073 - acc: 0.9640 - val_loss: 0.0175 - val_acc: 0.9157\n",
"Epoch 165/200\n",
"1156/1155 [==============================] - 151s 130ms/step - loss: 0.0068 - acc: 0.9660 - val_loss: 0.0187 - val_acc: 0.9073\n",
"Epoch 166/200\n",
"1156/1155 [==============================] - 150s 129ms/step - loss: 0.0072 - acc: 0.9647 - val_loss: 0.0198 - val_acc: 0.8994\n",
"Epoch 167/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0069 - acc: 0.9662 - val_loss: 0.0184 - val_acc: 0.9078\n",
"Epoch 168/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0069 - acc: 0.9659 - val_loss: 0.0182 - val_acc: 0.9087\n",
"Epoch 169/200\n",
"1156/1155 [==============================] - 150s 130ms/step - loss: 0.0068 - acc: 0.9671 - val_loss: 0.0205 - val_acc: 0.9003\n",
"Epoch 170/200\n",
"1156/1155 [==============================] - 153s 132ms/step - loss: 0.0068 - acc: 0.9665 - val_loss: 0.0180 - val_acc: 0.9122\n",
"Epoch 171/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0073 - acc: 0.9636 - val_loss: 0.0208 - val_acc: 0.8972\n",
"Epoch 172/200\n",
"1156/1155 [==============================] - 145s 126ms/step - loss: 0.0070 - acc: 0.9658 - val_loss: 0.0175 - val_acc: 0.9148\n",
"Epoch 173/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0066 - acc: 0.9675 - val_loss: 0.0177 - val_acc: 0.9130\n",
"Epoch 174/200\n",
"1156/1155 [==============================] - 145s 125ms/step - loss: 0.0067 - acc: 0.9669 - val_loss: 0.0173 - val_acc: 0.9161\n",
"Epoch 175/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0065 - acc: 0.9677 - val_loss: 0.0181 - val_acc: 0.9113\n",
"Epoch 176/200\n",
"1156/1155 [==============================] - 145s 126ms/step - loss: 0.0070 - acc: 0.9653 - val_loss: 0.0178 - val_acc: 0.9130\n",
"Epoch 177/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0068 - acc: 0.9666 - val_loss: 0.0203 - val_acc: 0.8977\n",
"Epoch 178/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0066 - acc: 0.9667 - val_loss: 0.0195 - val_acc: 0.9021\n",
"Epoch 179/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0068 - acc: 0.9666 - val_loss: 0.0183 - val_acc: 0.9104\n",
"Epoch 180/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0070 - acc: 0.9654 - val_loss: 0.0182 - val_acc: 0.9130\n",
"Epoch 181/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0063 - acc: 0.9689 - val_loss: 0.0192 - val_acc: 0.9060\n",
"Epoch 182/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0067 - acc: 0.9671 - val_loss: 0.0184 - val_acc: 0.9108\n",
"Epoch 183/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0065 - acc: 0.9684 - val_loss: 0.0181 - val_acc: 0.9122\n",
"Epoch 184/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0067 - acc: 0.9671 - val_loss: 0.0191 - val_acc: 0.9082\n",
"Epoch 185/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0063 - acc: 0.9682 - val_loss: 0.0178 - val_acc: 0.9122\n",
"Epoch 186/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0063 - acc: 0.9696 - val_loss: 0.0200 - val_acc: 0.9025\n",
"Epoch 187/200\n",
"1156/1155 [==============================] - 147s 128ms/step - loss: 0.0067 - acc: 0.9669 - val_loss: 0.0205 - val_acc: 0.9016\n",
"Epoch 188/200\n",
"1156/1155 [==============================] - 146s 126ms/step - loss: 0.0065 - acc: 0.9671 - val_loss: 0.0184 - val_acc: 0.9065\n",
"Epoch 189/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0066 - acc: 0.9673 - val_loss: 0.0200 - val_acc: 0.8990\n",
"Epoch 190/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0066 - acc: 0.9677 - val_loss: 0.0211 - val_acc: 0.9043\n",
"Epoch 191/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0063 - acc: 0.9687 - val_loss: 0.0165 - val_acc: 0.9201\n",
"Epoch 192/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0059 - acc: 0.9708 - val_loss: 0.0200 - val_acc: 0.8999\n",
"Epoch 193/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0062 - acc: 0.9706 - val_loss: 0.0179 - val_acc: 0.9174\n",
"Epoch 194/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0067 - acc: 0.9670 - val_loss: 0.0182 - val_acc: 0.9122\n",
"Epoch 195/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0064 - acc: 0.9688 - val_loss: 0.0180 - val_acc: 0.9117\n",
"Epoch 196/200\n",
"1156/1155 [==============================] - 148s 128ms/step - loss: 0.0061 - acc: 0.9701 - val_loss: 0.0191 - val_acc: 0.9051\n",
"Epoch 197/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0060 - acc: 0.9708 - val_loss: 0.0164 - val_acc: 0.9227\n",
"Epoch 198/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0061 - acc: 0.9692 - val_loss: 0.0183 - val_acc: 0.9117\n",
"Epoch 199/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0065 - acc: 0.9677 - val_loss: 0.0181 - val_acc: 0.9144\n",
"Epoch 200/200\n",
"1156/1155 [==============================] - 147s 127ms/step - loss: 0.0060 - acc: 0.9704 - val_loss: 0.0187 - val_acc: 0.9047\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x7fc7dbdc9a20>"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"batch_size = 32\n",
"one_hot_labels = keras.utils.to_categorical(y_train, num_classes=8)\n",
"one_hot_test = keras.utils.to_categorical(y_test, num_classes=8)\n",
"epochs = 100\n",
"model.fit_generator(datagen.flow(X_train, one_hot_labels, batch_size=32), steps_per_epoch = (len(X_train))/4, epochs=200, verbose=1, validation_data=datagen.flow(X_test, one_hot_test, batch_size=len(X_test)), validation_steps=1, shuffle=True)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Saved model to disk\n"
]
}
],
"source": [
"model_json = model.to_json()\n",
"with open(\"model.json\", \"w\") as json_file:\n",
" json_file.write(model_json)\n",
"model.save_weights(\"model_2.h5\")\n",
"print(\"Saved model to disk\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loaded model from disk\n",
"2277/2277 [==============================] - 5s 2ms/step\n"
]
}
],
"source": [
"json_file = open('model.json', 'r')\n",
"loaded_model_json = json_file.read()\n",
"json_file.close()\n",
"model = model_from_json(loaded_model_json)\n",
"# load weights into new model\n",
"model.load_weights(\"model.h5\")\n",
"print(\"Loaded model from disk\")\n",
"one_hot_labels = keras.utils.to_categorical(y_train, num_classes=8)\n",
"one_hot_test = keras.utils.to_categorical(y_test, num_classes=8)\n",
"# evaluate loaded model on test data\n",
"model.compile(optimizer = Adam(lr = 1e-4),loss='mean_squared_error',metrics=['accuracy'])\n",
"score = model.evaluate_generator(datagen.flow(X_test, one_hot_test,batch_size=1), verbose=1)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"acc: 90.87%\n"
]
}
],
"source": [
"print(\"%s: %.2f%%\" % (model.metrics_names[1], score[1]*100))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment