Skip to content

Instantly share code, notes, and snippets.

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(16, activation="relu", input_shape = (600,)))
model.add(tf.keras.layers.Dense(8, activation="relu"))
model.add(tf.keras.layers.Dense(4, activation="relu"))
model.add(tf.keras.layers.Dense(len(gestures), activation="softmax"))
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.summary()
# Loading the dataset
path = 'fire/'
input_shape = (256, 256, 3) # default input shape while loading the images
batch = 64
# The train and test datasets
print("Train dataset")
train_ds = preprocessing.image_dataset_from_directory(path+'Train', batch_size=batch, label_mode='binary')
# Normalizing the pixel values for faster convergence
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
test_ds = test_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size = AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size = AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size = AUTOTUNE)
base_model = keras.applications.InceptionV3(weights='imagenet', input_shape=input_shape, include_top=False) # False, do not include the classification layer of the model
base_model.trainable = False
inputs = tf.keras.Input(shape=input_shape)
x = base_model(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
outputs = keras.layers.Dense(1, activation = 'sigmoid')(x) # Add own classififcation layer
model = keras.Model(inputs, outputs)
# pick random test data sample from one batch
x = random.randint(0, batch - 1)
for i in test_ds.as_numpy_iterator():
img, label = i
plt.axis('off') # remove axes
plt.imshow(img[x]) # shape from (32, 256, 256, 3) --> (256, 256, 3)
output = model.predict(np.expand_dims(img[x],0))[0][0] # getting output; input shape (256, 256, 3) --> (1, 256, 256, 3)
pred = (output > 0.5).astype('int')
print("Prdicted: ", class_names[pred], '(', output, '-->', pred, ')') # Picking the label from class_names base don the model output
# Loading the dataset
path = 'weather/'
input_shape = (256, 256, 3) # default input shape while loading the images
batch = 64
# The train and test datasets
print("Train dataset")
train_ds = preprocessing.image_dataset_from_directory(path+'train', batch_size=batch, label_mode='binary')
# Normalizing the pixel values for faster convergence
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
test_ds = test_ds.map(lambda x, y: (normalization_layer(x), y))
base_model = keras.applications.DenseNet121(weights='imagenet', input_shape=input_shape, include_top=False) # False, do not include the classification layer of the model
base_model.trainable = False
inputs = tf.keras.Input(shape=input_shape)
x = base_model(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
outputs = keras.layers.Dense(1, activation = 'sigmoid')(x) # Add own classififcation layer
model = keras.Model(inputs, outputs)
# pick random test data sample from one batch
x = random.randint(0, batch - 1)
for i in test_ds.as_numpy_iterator():
img, label = i
plt.axis('off') # remove axes
plt.imshow(img[x]) # shape from (32, 256, 256, 3) --> (256, 256, 3)
output = model.predict(np.expand_dims(img[x],0))[0][0] # getting output; input shape (256, 256, 3) --> (1, 256, 256, 3)
pred = (output > 0.5).astype('int')
print("Predicted: ", class_names[pred], '(', output, '-->', pred, ')') # Picking the label from class_names base don the model output