Skip to content

Instantly share code, notes, and snippets.

View maltseasy's full-sized avatar
🎯
Focusing

maltseasy

🎯
Focusing
View GitHub Profile

Keybase proof

I hereby claim:

  • I am aryanmisra on github.
  • I am malteasy (https://keybase.io/malteasy) on keybase.
  • I have a public key ASATpYrHN8vMcQtXOCMOP4RjRn8yXTaWD3mtt9vpeOOtTwo

To claim this, I am signing this object:

encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 10000 # Number of samples to train on.
filepath = "model.h5"
# Declare a checkpoint to save the best version of the model
checkpoint = ModelCheckpoint(filepath, monitor='val_top_3_accuracy', verbose=1,
save_best_only=True, mode='max')
# Reduce the learning rate as the learning stagnates
reduce_lr = ReduceLROnPlateau(monitor='val_top_3_accuracy', factor=0.5, patience=2,
verbose=1, mode='max', min_lr=0.00001)
# Define Top2 and Top3 Accuracy
from keras.metrics import categorical_accuracy, top_k_categorical_accuracy
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
def top_2_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=2)
# Compile the model
# Create a MobileNet model
mobile = keras.applications.mobilenet.MobileNet()
# Modify the model
# Choose the 6th layer from the last
x = mobile.layers[-6].output
# Add a dropout and dense layer for predictions
x = Dropout(0.25)(x)
predictions = Dense(7, activation='softmax')(x)
# Set up generators
train_batches = ImageDataGenerator(
preprocessing_function= \
keras.applications.mobilenet.preprocess_input).flow_from_directory(
train_path, #pointing to the path where the data is stored
target_size=(image_size, image_size), #the target size of the image output
batch_size=train_batch_size) #size of the batches
valid_batches = ImageDataGenerator(
preprocessing_function= \
# The paths for the training and validation images
train_path = 'base_dir/train_dir'
valid_path = 'base_dir/val_dir'
# Declare a few useful values
#number of images in the training set
num_train_samples = 9013
#number of images in the evaluation set
num_val_samples = 1002
# Import the libraries
import numpy as np
import keras
from keras import backend as K
from keras.layers.core import Dense, Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import confusion_matrix