Last active
September 30, 2019 17:52
-
-
Save anupamchugh/9bdb12ee2daf28575c0667622ab1b420 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"nbformat": 4, | |
"nbformat_minor": 0, | |
"metadata": { | |
"colab": { | |
"name": "CategoricalClassification.ipynb", | |
"provenance": [], | |
"collapsed_sections": [] | |
}, | |
"kernelspec": { | |
"name": "python3", | |
"display_name": "Python 3" | |
} | |
}, | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "7Ihybkh9hivU", | |
"colab_type": "code", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 34 | |
}, | |
"outputId": "09e744aa-5e39-4fb2-9d34-301270480936" | |
}, | |
"source": [ | |
"from zipfile import ZipFile\n", | |
"file_name = \"predict.zip\"\n", | |
"\n", | |
"with ZipFile(file_name, 'r') as zip:\n", | |
" zip.extractall()\n", | |
" print('Done')" | |
], | |
"execution_count": 5, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": [ | |
"Done\n" | |
], | |
"name": "stdout" | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "4CSzA0OmlExF", | |
"colab_type": "code", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 34 | |
}, | |
"outputId": "3a83f46d-3ebf-4e0b-ffa2-4b3cf0b15c9b" | |
}, | |
"source": [ | |
"from zipfile import ZipFile\n", | |
"file_name = \"data.zip\"\n", | |
"\n", | |
"with ZipFile(file_name, 'r') as zip:\n", | |
" zip.extractall()\n", | |
" print('Done')" | |
], | |
"execution_count": 6, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": [ | |
"Done\n" | |
], | |
"name": "stdout" | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "oruMSnOci7PX", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"source": [ | |
"import pandas as pd\n", | |
"from keras.preprocessing.image import ImageDataGenerator\n", | |
"from keras.models import Model\n", | |
"from keras.layers import Input, Convolution2D, MaxPooling2D, Dropout, Flatten, Dense\n", | |
"from keras.callbacks import Callback, ModelCheckpoint\n", | |
"\n", | |
"IMG_WIDTH, IMG_HEIGHT = 150, 150\n", | |
"TRAIN_DATA_DIR = 'data/train'\n", | |
"VALIDATION_DATA_DIR = 'data/validation'\n", | |
"NB_TRAIN_SAMPLES = 2000\n", | |
"NB_VALIDATION_SAMPLES = 800\n", | |
"NB_EPOCH = 5\n", | |
"BATCH_SIZE = 32" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "7pVtjjWPjHg-", | |
"colab_type": "code", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 377 | |
}, | |
"outputId": "681d9cc6-d45a-4918-c2c4-235647951412" | |
}, | |
"source": [ | |
"input = Input(shape=(IMG_WIDTH, IMG_HEIGHT, 3,))\n", | |
"x = Convolution2D(32, 3, 3, activation='relu')(input)\n", | |
"x = MaxPooling2D(pool_size=(2, 2))(x)\n", | |
"x = Convolution2D(32, 3, 3, activation='relu')(x)\n", | |
"x = MaxPooling2D(pool_size=(2, 2))(x)\n", | |
"x = Convolution2D(64, 3, 3, activation='relu')(x)\n", | |
"x = MaxPooling2D(pool_size=(2, 2))(x)\n", | |
"x = Flatten()(x)\n", | |
"x = Dense(64, activation='relu')(x)\n", | |
"x = Dropout(0.5)(x)\n", | |
"x = Dense(2, activation='softmax')(x)\n", | |
"model = Model(input=input, output=x)\n", | |
"\n", | |
"model.compile(loss='categorical_crossentropy',\n", | |
" optimizer='rmsprop',\n", | |
" metrics=['accuracy'])\n", | |
"\n", | |
"\n", | |
"train_datagen = ImageDataGenerator(\n", | |
" rescale=1./255,\n", | |
" shear_range=0.2,\n", | |
" zoom_range=0.2,\n", | |
" horizontal_flip=True)\n", | |
"\n", | |
"test_datagen = ImageDataGenerator(rescale=1./255)\n", | |
"\n", | |
"train_generator = train_datagen.flow_from_directory(\n", | |
" TRAIN_DATA_DIR,\n", | |
" target_size=(IMG_WIDTH, IMG_HEIGHT),\n", | |
" batch_size=BATCH_SIZE,\n", | |
" class_mode='categorical')\n", | |
"\n", | |
"validation_generator = test_datagen.flow_from_directory(\n", | |
" VALIDATION_DATA_DIR,\n", | |
" target_size=(IMG_WIDTH, IMG_HEIGHT),\n", | |
" batch_size=BATCH_SIZE,\n", | |
" class_mode='categorical')\n", | |
"\n", | |
"model.fit_generator(\n", | |
" train_generator,\n", | |
" steps_per_epoch=NB_TRAIN_SAMPLES // batch_size,\n", | |
" epochs=NB_EPOCH,\n", | |
" validation_data=validation_generator,\n", | |
" validation_steps=NB_VALIDATION_SAMPLES // batch_size)\n", | |
"\n", | |
"model.save('model.h5')\n" | |
], | |
"execution_count": 14, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": [ | |
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3), activation=\"relu\")`\n", | |
" \n", | |
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3), activation=\"relu\")`\n", | |
" after removing the cwd from sys.path.\n", | |
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:6: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation=\"relu\")`\n", | |
" \n", | |
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:12: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor(\"in..., outputs=Tensor(\"de...)`\n", | |
" if sys.path[0] == '':\n" | |
], | |
"name": "stderr" | |
}, | |
{ | |
"output_type": "stream", | |
"text": [ | |
"Found 2001 images belonging to 2 classes.\n", | |
"Found 1008 images belonging to 2 classes.\n", | |
"Epoch 1/5\n", | |
"125/125 [==============================] - 152s 1s/step - loss: 0.6994 - acc: 0.5678 - val_loss: 0.6287 - val_acc: 0.6717\n", | |
"Epoch 2/5\n", | |
"125/125 [==============================] - 150s 1s/step - loss: 0.6277 - acc: 0.6614 - val_loss: 0.6046 - val_acc: 0.6888\n", | |
"Epoch 3/5\n", | |
"125/125 [==============================] - 151s 1s/step - loss: 0.5723 - acc: 0.7099 - val_loss: 0.6208 - val_acc: 0.6679\n", | |
"Epoch 4/5\n", | |
"125/125 [==============================] - 150s 1s/step - loss: 0.5546 - acc: 0.7254 - val_loss: 0.6821 - val_acc: 0.6467\n", | |
"Epoch 5/5\n", | |
"125/125 [==============================] - 151s 1s/step - loss: 0.5224 - acc: 0.7562 - val_loss: 0.5590 - val_acc: 0.7165\n" | |
], | |
"name": "stdout" | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "djJED46Gmn3G", | |
"colab_type": "code", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 34 | |
}, | |
"outputId": "dd1ca4ac-d514-427c-9c08-cce3a9226234" | |
}, | |
"source": [ | |
"score = model.evaluate(validation_generator, verbose=0)\n", | |
"print(\"accuracy: \", score[1])" | |
], | |
"execution_count": 15, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": [ | |
"accuracy: 0.7212301587301587\n" | |
], | |
"name": "stdout" | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "hq0Ju_CdmB5E", | |
"colab_type": "code", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 238 | |
}, | |
"outputId": "62687f9b-576a-4316-b5d7-c1c321955069" | |
}, | |
"source": [ | |
"from keras.models import load_model\n", | |
"from keras.preprocessing import image\n", | |
"import numpy as np\n", | |
"from os import listdir\n", | |
"from os.path import isfile, join\n", | |
"\n", | |
"# dimensions of our images\n", | |
"img_width, img_height = 150, 150\n", | |
"\n", | |
"# load the model we saved\n", | |
"model = load_model('model.h5')\n", | |
"model.compile(loss='categorical_crossentropy',\n", | |
" optimizer='rmsprop',\n", | |
" metrics=['accuracy'])\n", | |
"\n", | |
"mypath = \"predict/\"\n", | |
"\n", | |
"onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n", | |
"print(onlyfiles)\n", | |
"# predicting images\n", | |
"for file in onlyfiles:\n", | |
" img = image.load_img(mypath+file, target_size=(img_width, img_height))\n", | |
" x = image.img_to_array(img)\n", | |
" x = np.expand_dims(x, axis=0)\n", | |
" \n", | |
" result = model.predict(x)\n", | |
" print(\"predict result..\")\n", | |
" print(result)" | |
], | |
"execution_count": 16, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": [ | |
"['1.jpg', '2.jpg', '6.jpg', '5.jpg', '4.jpg', '3.jpg']\n", | |
"predict result..\n", | |
"[[1. 0.]]\n", | |
"predict result..\n", | |
"[[1. 0.]]\n", | |
"predict result..\n", | |
"[[1. 0.]]\n", | |
"predict result..\n", | |
"[[1. 0.]]\n", | |
"predict result..\n", | |
"[[1. 0.]]\n", | |
"predict result..\n", | |
"[[0. 1.]]\n" | |
], | |
"name": "stdout" | |
} | |
] | |
} | |
] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment