Skip to content

Instantly share code, notes, and snippets.

View VedPDubey's full-sized avatar

Ved Prakash Dubey VedPDubey

View GitHub Profile
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 3, activation='relu', input_shape=(150,150,3)),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(32, 5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
epochs = 200
batch_size = 32
history = model.fit(train_images,train_labels,
batch_size=batch_size,
steps_per_epoch=len(train_images)//batch_size,
epochs=epochs,
verbose=1,
validation_data=(val_images,val_labels),
validation_steps=len(val_images)//batch_size,
model = Sequential([
Input(shape=(96,96,3)),
ZeroPadding2D((5,5)),
Conv2D(32, 3, activation='relu'),
BatchNormalization(),
MaxPool2D(pool_size=(2,2)),
Dropout(0.3),
Conv2D(32, 3, activation='relu'),
MaxPool2D(pool_size=(2,2)),
Dropout(0.3),
def custom_predictions(path):
img = ig.load_img(path, target_size=(64, 64))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
img.reshape(1,64,64,3)
prediction = np.argmax(model.predict(img))
plt.title(labels[prediction])
plt.show()
labels = ["00-damage","01-whole"]
it = iter(testing_set)
batch = next(it) # Gets a batch of 16 test images
fig, axes = plt.subplots(3, 3, figsize=(10,10))
fig.tight_layout()
fig.subplots_adjust(hspace=.25)
for i in range(3):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(64,64,3)),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.3),
train_dir = 'data1a/training'
test_dir = 'data1a/validation'
train_data = ImageDataGenerator(rescale = 1./255,shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
#defining training set, here size of image is reduced to 150x150, batch of images is kept as 128 and class is defined as 'categorical'.
training_set = train_data.flow_from_directory(train_dir, batch_size = 32, target_size = (64,64), class_mode = 'categorical')
#applying same scale as training set, but only feature scaling is applied. image augmentation is avoided to prevent leakage of testing data.
test_data = ImageDataGenerator(rescale = 1./255)
test_pred = model.predict(test_images)
test_pred = np.argmax(test_pred,axis=1)
test_actual = np.argmax(test_labels,axis=1)
rnd_idx = random.sample(range(0,400),8)
class_labels = {i:class_name for (class_name,i) in class_name_labels.items()}
for i,idx in enumerate(rnd_idx):
plt.imshow(test_images[idx])
def show_final_history(history):
plt.style.use("ggplot")
fig, ax = plt.subplots(1,2,figsize=(15,5))
ax[0].set_title('Loss')
ax[1].set_title('Accuracy')
ax[0].plot(history.history['loss'],label='Train Loss')
ax[0].plot(history.history['val_loss'],label='Validation Loss')
ax[1].plot(history.history['accuracy'],label='Train Accuracy')
ax[1].plot(history.history['val_accuracy'],label='Validation Accuracy')
epochs = 50
batch_size = 16
history = model.fit(train_images,train_labels,
batch_size=batch_size,
steps_per_epoch=len(train_images)//batch_size,
epochs=epochs,
verbose=1,
validation_data=(val_images,val_labels),
validation_steps=len(val_images)//batch_size,