Created
September 20, 2019 15:33
-
-
Save SpicySyntax/33eacd7a84c8b5803f9aad0009613ca6 to your computer and use it in GitHub Desktop.
Keras Crack Classifier
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
from IPython.display import display | |
from PIL import Image | |
import keras | |
%matplotlib inline | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import math | |
print(tf.__version__) | |
print(keras.__version__) | |
# View Available GPUs | |
from keras import backend as K | |
K.tensorflow_backend._get_available_gpus() | |
"""------------------------ | |
Data Loaders | |
------------------------""" | |
from keras.preprocessing.image import ImageDataGenerator | |
from keras.applications.vgg16 import preprocess_input | |
img_height, img_width = 224, 224 | |
batch_size = 10 | |
train_data_dir = "./dataset" | |
train_datagen = ImageDataGenerator(shear_range=0.2, | |
zoom_range=0.2, | |
horizontal_flip=True, | |
rotation_range=45, | |
validation_split=0.2, | |
preprocessing_function=preprocess_input) # set validation split | |
train_generator = train_datagen.flow_from_directory(train_data_dir, | |
target_size=(img_height, img_width), | |
batch_size=batch_size, | |
class_mode='binary', | |
subset='training') # set as training data | |
validation_generator = train_datagen.flow_from_directory(train_data_dir, | |
target_size=(img_height, img_width), | |
batch_size=batch_size, | |
class_mode='binary', | |
subset='validation') # set as validation data | |
"""------------------------ | |
Create And Traing Model | |
------------------------""" | |
from keras import applications | |
from keras.models import Sequential, load_model | |
from keras.layers import Dropout, Flatten, Dense, Reshape, MaxPooling2D, Conv2D | |
from keras.regularizers import l2 | |
from keras.layers.normalization import BatchNormalization | |
from keras import optimizers | |
# Function to create the model, required for KerasClassifier | |
def create_model(learning_rate, num_dense_layers, | |
num_dense_nodes, activation, dropout_rate, | |
momentum): | |
""" | |
Hyper-parameters: | |
learning_rate: Learning-rate for the optimizer. | |
num_dense_layers: Number of dense layers. | |
num_dense_nodes: Number of nodes in each dense layer. | |
activation: Activation function for all layers. | |
""" | |
# build the VGG16 network from base imagenet model | |
base_model = applications.VGG16(weights='imagenet', include_top=True, input_shape=(img_height, img_width, 3)) | |
# Build out network for transfer learning | |
model = Sequential() | |
# take off last two layers | |
for layer in base_model.layers[:-1]: | |
model.add(layer) | |
for i in range(num_dense_layers): | |
name = 'layer_dense_{0}'.format(i+1) | |
model.add(Dense(num_dense_nodes, activation=activation, name=name)) | |
model.add(Dropout(dropout_rate)) | |
model.add(BatchNormalization()) | |
model.add(Dense(1, activation='sigmoid')) | |
# set the first 11 layers (up to the last conv block) | |
for layer in model.layers[:11]: | |
layer.trainable = False | |
# setup optimization strategy | |
model.compile(loss='binary_crossentropy', | |
optimizer=optimizers.SGD(lr=learning_rate, momentum=momentum), | |
metrics=['accuracy']) | |
return model | |
from keras.callbacks import ModelCheckpoint, EarlyStopping | |
from keras.backend import manual_variable_initialization | |
def train_model(model, epochs=40, epochs_to_wait=10): | |
""" | |
Hyper-parameters: | |
learning_rate: Learning-rate for the optimizer. | |
num_dense_layers: Number of dense layers. | |
num_dense_nodes: Number of nodes in each dense layer. | |
activation: Activation function for all layers. | |
""" | |
checkpoint_callback = ModelCheckpoint('crack_classifier_chkpt.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min') | |
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=epochs_to_wait) | |
# Use Keras to train the model. | |
history = model.fit_generator( | |
train_generator, | |
steps_per_epoch=5249//batch_size, | |
epochs=epochs, | |
validation_data=validation_generator, | |
validation_steps=1312//batch_size, verbose=True, | |
callbacks=[early_stopping_callback, checkpoint_callback]) | |
return history | |
# Create the neural network with these hyper-parameters. | |
model = create_model(1e-5, 1, 16, 'relu', .5,.8,) | |
# Train | |
history = train_model(model) | |
# Output Below | |
""" | |
Epoch 1/40 | |
524/524 [==============================] - 1329s 3s/step - loss: 0.8319 - acc: 0.5246 - val_loss: 0.6275 - val_acc: 0.7221 | |
Epoch 00001: val_loss improved from inf to 0.62746, saving model to crack_classifier_chkpt.h5 | |
Epoch 2/40 | |
524/524 [==============================] - 1312s 3s/step - loss: 0.7429 - acc: 0.5464 - val_loss: 0.6715 - val_acc: 0.7107 | |
Epoch 00002: val_loss did not improve from 0.62746 | |
Epoch 3/40 | |
524/524 [==============================] - 1156s 2s/step - loss: 0.7017 - acc: 0.5786 - val_loss: 0.6216 - val_acc: 0.8298 | |
Epoch 00003: val_loss improved from 0.62746 to 0.62158, saving model to crack_classifier_chkpt.h5 | |
Epoch 4/40 | |
524/524 [==============================] - 1280s 2s/step - loss: 0.6711 - acc: 0.6103 - val_loss: 0.6136 - val_acc: 0.8649 | |
Epoch 00004: val_loss improved from 0.62158 to 0.61362, saving model to crack_classifier_chkpt.h5 | |
Epoch 5/40 | |
524/524 [==============================] - 1460s 3s/step - loss: 0.6485 - acc: 0.6380 - val_loss: 0.5730 - val_acc: 0.9176 | |
Epoch 00005: val_loss improved from 0.61362 to 0.57303, saving model to crack_classifier_chkpt.h5 | |
Epoch 6/40 | |
524/524 [==============================] - 1369s 3s/step - loss: 0.6248 - acc: 0.6727 - val_loss: 0.5979 - val_acc: 0.8962 | |
Epoch 00006: val_loss did not improve from 0.57303 | |
Epoch 7/40 | |
524/524 [==============================] - 1079s 2s/step - loss: 0.6032 - acc: 0.6977 - val_loss: 0.6165 - val_acc: 0.8458 | |
Epoch 00007: val_loss did not improve from 0.57303 | |
Epoch 8/40 | |
524/524 [==============================] - 997s 2s/step - loss: 0.5784 - acc: 0.7260 - val_loss: 0.5539 - val_acc: 0.8733 | |
Epoch 00008: val_loss improved from 0.57303 to 0.55389, saving model to crack_classifier_chkpt.h5 | |
Epoch 9/40 | |
524/524 [==============================] - 976s 2s/step - loss: 0.5486 - acc: 0.7969 - val_loss: 0.5246 - val_acc: 0.9344 | |
Epoch 00009: val_loss improved from 0.55389 to 0.52456, saving model to crack_classifier_chkpt.h5 | |
Epoch 10/40 | |
524/524 [==============================] - 1004s 2s/step - loss: 0.5252 - acc: 0.8483 - val_loss: 0.5030 - val_acc: 0.9420 | |
Epoch 00010: val_loss improved from 0.52456 to 0.50295, saving model to crack_classifier_chkpt.h5 | |
Epoch 11/40 | |
524/524 [==============================] - 964s 2s/step - loss: 0.5062 - acc: 0.8912 - val_loss: 0.4630 - val_acc: 0.9893 | |
Epoch 00011: val_loss improved from 0.50295 to 0.46302, saving model to crack_classifier_chkpt.h5 | |
Epoch 12/40 | |
524/524 [==============================] - 1045s 2s/step - loss: 0.4782 - acc: 0.9924 - val_loss: 0.4979 - val_acc: 0.9916 | |
Epoch 00012: val_loss did not improve from 0.46302 | |
Epoch 13/40 | |
524/524 [==============================] - 1113s 2s/step - loss: 0.4631 - acc: 0.9968 - val_loss: 0.4762 - val_acc: 0.9931 | |
Epoch 00013: val_loss did not improve from 0.46302 | |
Epoch 14/40 | |
524/524 [==============================] - 1002s 2s/step - loss: 0.4499 - acc: 0.9954 - val_loss: 0.4477 - val_acc: 0.9962 | |
Epoch 00014: val_loss improved from 0.46302 to 0.44765, saving model to crack_classifier_chkpt.h5 | |
Epoch 15/40 | |
524/524 [==============================] - 954s 2s/step - loss: 0.4368 - acc: 0.9973 - val_loss: 0.4460 - val_acc: 0.9954 | |
Epoch 00015: val_loss improved from 0.44765 to 0.44603, saving model to crack_classifier_chkpt.h5 | |
Epoch 16/40 | |
524/524 [==============================] - 1197s 2s/step - loss: 0.4242 - acc: 0.9985 - val_loss: 0.4209 - val_acc: 0.9954 | |
Epoch 00016: val_loss improved from 0.44603 to 0.42094, saving model to crack_classifier_chkpt.h5 | |
Epoch 17/40 | |
524/524 [==============================] - 1043s 2s/step - loss: 0.4121 - acc: 0.9983 - val_loss: 0.4155 - val_acc: 0.9977 | |
Epoch 00017: val_loss improved from 0.42094 to 0.41545, saving model to crack_classifier_chkpt.h5 | |
Epoch 18/40 | |
524/524 [==============================] - 987s 2s/step - loss: 0.4008 - acc: 0.9983 - val_loss: 0.4102 - val_acc: 0.9947 | |
Epoch 00018: val_loss improved from 0.41545 to 0.41017, saving model to crack_classifier_chkpt.h5 | |
Epoch 19/40 | |
524/524 [==============================] - 1065s 2s/step - loss: 0.3900 - acc: 0.9981 - val_loss: 0.4013 - val_acc: 0.9939 | |
Epoch 00019: val_loss improved from 0.41017 to 0.40132, saving model to crack_classifier_chkpt.h5 | |
Epoch 20/40 | |
524/524 [==============================] - 990s 2s/step - loss: 0.3793 - acc: 0.9983 - val_loss: 0.3932 - val_acc: 0.9947 | |
Epoch 00020: val_loss improved from 0.40132 to 0.39316, saving model to crack_classifier_chkpt.h5 | |
Epoch 21/40 | |
524/524 [==============================] - 1082s 2s/step - loss: 0.3692 - acc: 0.9989 - val_loss: 0.3665 - val_acc: 0.9985 | |
Epoch 00021: val_loss improved from 0.39316 to 0.36652, saving model to crack_classifier_chkpt.h5 | |
Epoch 22/40 | |
524/524 [==============================] - 1116s 2s/step - loss: 0.3594 - acc: 0.9985 - val_loss: 0.3581 - val_acc: 0.9962 | |
Epoch 00022: val_loss improved from 0.36652 to 0.35806, saving model to crack_classifier_chkpt.h5 | |
Epoch 23/40 | |
524/524 [==============================] - 1180s 2s/step - loss: 0.3502 - acc: 0.9989 - val_loss: 0.3446 - val_acc: 0.9969 | |
Epoch 00023: val_loss improved from 0.35806 to 0.34459, saving model to crack_classifier_chkpt.h5 | |
Epoch 24/40 | |
524/524 [==============================] - 1100s 2s/step - loss: 0.3410 - acc: 0.9994 - val_loss: 0.3483 - val_acc: 0.9985 | |
Epoch 00024: val_loss did not improve from 0.34459 | |
Epoch 25/40 | |
524/524 [==============================] - 1057s 2s/step - loss: 0.3323 - acc: 0.9998 - val_loss: 0.3295 - val_acc: 0.9992 | |
Epoch 00025: val_loss improved from 0.34459 to 0.32946, saving model to crack_classifier_chkpt.h5 | |
Epoch 26/40 | |
524/524 [==============================] - 1295s 2s/step - loss: 0.3241 - acc: 0.9998 - val_loss: 0.3204 - val_acc: 0.9992 | |
Epoch 00026: val_loss improved from 0.32946 to 0.32040, saving model to crack_classifier_chkpt.h5 | |
Epoch 27/40 | |
524/524 [==============================] - 1089s 2s/step - loss: 0.3161 - acc: 0.9996 - val_loss: 0.3248 - val_acc: 0.9977 | |
Epoch 00027: val_loss did not improve from 0.32040 | |
Epoch 28/40 | |
524/524 [==============================] - 994s 2s/step - loss: 0.3084 - acc: 0.9996 - val_loss: 0.3103 - val_acc: 0.9985 | |
Epoch 00028: val_loss improved from 0.32040 to 0.31030, saving model to crack_classifier_chkpt.h5 | |
Epoch 29/40 | |
524/524 [==============================] - 994s 2s/step - loss: 0.3009 - acc: 1.0000 - val_loss: 0.3058 - val_acc: 0.9985 | |
Epoch 00029: val_loss improved from 0.31030 to 0.30578, saving model to crack_classifier_chkpt.h5 | |
Epoch 30/40 | |
524/524 [==============================] - 967s 2s/step - loss: 0.2938 - acc: 0.9996 - val_loss: 0.2994 - val_acc: 0.9992 | |
Epoch 00030: val_loss improved from 0.30578 to 0.29936, saving model to crack_classifier_chkpt.h5 | |
Epoch 31/40 | |
524/524 [==============================] - 986s 2s/step - loss: 0.2868 - acc: 0.9998 - val_loss: 0.2961 - val_acc: 0.9985 | |
Epoch 00031: val_loss improved from 0.29936 to 0.29615, saving model to crack_classifier_chkpt.h5 | |
Epoch 32/40 | |
524/524 [==============================] - 986s 2s/step - loss: 0.2801 - acc: 1.0000 - val_loss: 0.2820 - val_acc: 0.9985 | |
Epoch 00032: val_loss improved from 0.29615 to 0.28197, saving model to crack_classifier_chkpt.h5 | |
Epoch 33/40 | |
524/524 [==============================] - 1058s 2s/step - loss: 0.2736 - acc: 1.0000 - val_loss: 0.2736 - val_acc: 0.9992 | |
Epoch 00033: val_loss improved from 0.28197 to 0.27365, saving model to crack_classifier_chkpt.h5 | |
Epoch 34/40 | |
524/524 [==============================] - 971s 2s/step - loss: 0.2674 - acc: 0.9998 - val_loss: 0.2641 - val_acc: 1.0000 | |
Epoch 00034: val_loss improved from 0.27365 to 0.26415, saving model to crack_classifier_chkpt.h5 | |
Epoch 35/40 | |
524/524 [==============================] - 953s 2s/step - loss: 0.2615 - acc: 1.0000 - val_loss: 0.2630 - val_acc: 0.9992 | |
Epoch 00035: val_loss improved from 0.26415 to 0.26295, saving model to crack_classifier_chkpt.h5 | |
Epoch 36/40 | |
524/524 [==============================] - 1176s 2s/step - loss: 0.2556 - acc: 1.0000 - val_loss: 0.2660 - val_acc: 0.9977 | |
Epoch 00036: val_loss did not improve from 0.26295 | |
Epoch 37/40 | |
524/524 [==============================] - 1040s 2s/step - loss: 0.2502 - acc: 0.9996 - val_loss: 0.2531 - val_acc: 0.9992 | |
Epoch 00037: val_loss improved from 0.26295 to 0.25314, saving model to crack_classifier_chkpt.h5 | |
Epoch 38/40 | |
524/524 [==============================] - 1019s 2s/step - loss: 0.2446 - acc: 1.0000 - val_loss: 0.2431 - val_acc: 1.0000 | |
Epoch 00038: val_loss improved from 0.25314 to 0.24308, saving model to crack_classifier_chkpt.h5 | |
Epoch 39/40 | |
524/524 [==============================] - 1087s 2s/step - loss: 0.2393 - acc: 1.0000 - val_loss: 0.2380 - val_acc: 1.0000 | |
Epoch 00039: val_loss improved from 0.24308 to 0.23800, saving model to crack_classifier_chkpt.h5 | |
Epoch 40/40 | |
524/524 [==============================] - 988s 2s/step - loss: 0.2342 - acc: 1.0000 - val_loss: 0.2344 - val_acc: 1.0000 | |
Epoch 00040: val_loss improved from 0.23800 to 0.23444, saving model to crack_classifier_chkpt.h5 | |
""" | |
"""------------------------ | |
Test Model | |
------------------------""" | |
from keras.preprocessing.image import load_img | |
from keras.preprocessing.image import img_to_array | |
from keras.preprocessing.image import load_img | |
from keras.preprocessing.image import img_to_array | |
def test_crack_model(model): | |
crack_test_images = ["crack.jpg", "crack1.jpg", "crack2.jpg", "crack3.jpg", | |
"crack4.jpg", "crack5.jpg", "crack6.jpg", "crack7.jpg", | |
"crack8.jpg", "crack9.jpg", "crack10.jpg", "crack11.jpg", | |
"crack12.jpg", "crack13.jpg"] | |
nocrack_test_images = ["nocrack1.jpg", "nocrack2.jpg", "nocrack3.jpg", | |
"nocrack4.jpg", "nocrack5.jpg", "nocrack6.jpg", | |
"nocrack7.jpg", "nocrack8.jpg"] | |
print("============== Cracks ===============") | |
for i, crack_test_image in enumerate(crack_test_images): | |
# load an image from file | |
image = load_img('Images/crack/'+crack_test_image, target_size=(224, 224)) | |
# convert the image pixels to a numpy array | |
image = img_to_array(image) | |
# reshape data for the model | |
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) | |
# preprocess input | |
image = preprocess_input(image) | |
# predict the probability across all output classes | |
yhat = model.predict(image) | |
print("{}: {}".format(i, yhat)) | |
print("============== No Cracks ===============") | |
for i, nocrack_test_image in enumerate(nocrack_test_images): | |
#break | |
# load an image from file | |
image = load_img('Images/nocrack/'+nocrack_test_image, target_size=(224, 224)) | |
# convert the image pixels to a numpy array | |
image = img_to_array(image) | |
# reshape data for the model | |
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) | |
# preprocess input | |
image = preprocess_input(image) | |
# predict the probability across all output classes | |
yhat = model.predict(image) | |
print("{}: {}".format(i, yhat)) | |
test_crack_model(model) | |
# Output Below (Model outputs results like it was not trained | |
""" | |
============== Cracks =============== | |
============== Cracks =============== | |
0: [[0.20922232]] | |
1: [[0.21883659]] | |
2: [[0.20403491]] | |
3: [[0.20747341]] | |
4: [[0.21976307]] | |
5: [[0.2170394]] | |
6: [[0.19359364]] | |
7: [[0.2190856]] | |
8: [[0.2077934]] | |
9: [[0.20873123]] | |
10: [[0.21976307]] | |
11: [[0.20891656]] | |
12: [[0.02694948]] | |
13: [[0.2004625]] | |
============== No Cracks =============== | |
0: [[0.21425322]] | |
1: [[0.9999826]] | |
2: [[0.2190724]] | |
3: [[0.21664117]] | |
4: [[0.21976307]] | |
5: [[0.21329497]] | |
6: [[0.20993665]] | |
7: [[0.20981254]] | |
""" | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment