Skip to content

Instantly share code, notes, and snippets.

@danyashorokh
Last active October 9, 2020 13:55
Show Gist options
  • Save danyashorokh/6420a9b2c1dc80cc4c78ff8e8cc085e7 to your computer and use it in GitHub Desktop.
Save danyashorokh/6420a9b2c1dc80cc4c78ff8e8cc085e7 to your computer and use it in GitHub Desktop.
[KERAS] ImageDataGenerator images and masks
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import imgaug as ia
import imgaug.augmenters as iaa
ia.seed(1)
train_imgs = np.load('train_imgs.npy')
train_masks = np.load('train_masks.npy')
val_imgs = np.load('val_imgs.npy')
val_masks = np.load('val_masks.npy')
seq = iaa.SomeOf((0, None), [
# iaa.CropAndPad(percent=(-0.25, 0.25))
iaa.HorizontalFlip(1), # horizontally flip the images
iaa.VerticalFlip(1), # vertical flip the images
iaa.Rot90([1]), # 90
iaa.Rot90([2]), # 180
# iaa.Rot90([3]), # 270
# iaa.Clouds(),
# iaa.Fog(),
iaa.Crop(px=(5, 16)), # 128 crop images from each side by 0 to 16px (randomly chosen)\n",
iaa.Affine(
scale={"x": (1, 1.4), "y": (1, 1.4)},
#translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},
rotate=(-5, 5),
shear=(-10, 10)
),
iaa.AddToHue((-20, 20)),
iaa.AdditiveGaussianNoise(scale=0.02 * 255),
iaa.GammaContrast((0.6, 1.7)),
])
seq_det = seq.to_deterministic()
image_datagen_train = ImageDataGenerator(rescale=1./255, preprocessing_function=seq_det.augment_image)
mask_datagen_train = ImageDataGenerator(preprocessing_function=seq_det.augment_image)
image_datagen_val = ImageDataGenerator(rescale=1./255)
mask_datagen_val = ImageDataGenerator()
'''
data_gen_args = dict(rescale=1./255,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
rotation_range=5.,
horizontal_flip=True,
vertical_flip=True
)
'''
# image_datagen_train = ImageDataGenerator(**data_gen_args)
# mask_datagen_train = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 42
image_datagen_train.fit(train_imgs, augment=True, seed=seed)
mask_datagen_train.fit(train_masks, augment=True, seed=seed)
image_datagen_val.fit(test_imgs, augment=True, seed=seed)
mask_datagen_val.fit(test_masks, augment=True, seed=seed)
train_batch_size = 32
val_batch_size = 32
image_generator_train = image_datagen_train.flow(train_imgs, batch_size=train_batch_size, shuffle=True, seed=seed)
mask_generator_train = mask_datagen_train.flow(train_masks, batch_size=train_batch_size, shuffle=True, seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator_train, mask_generator_train)
image_generator_val = image_datagen_val.flow(test_imgs, batch_size=val_batch_size, shuffle=False)
mask_generator_val = mask_datagen_val.flow(test_masks, batch_size=val_batch_size, shuffle=False)
val_generator = zip(image_generator_val, mask_generator_val)
model = some_model()
path = f'snapshots/'
if not os.path.exists(path):
os.makedirs(path)
earlystopper = EarlyStopping(patience=15, verbose=1)
filepath = path + 'e{epoch:02d}_b64_val{val_loss:.4f}_iou{iou:.4f}_iouv{val_metric:.4f}.h5'
callbacks = [
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_lr=1e-9, epsilon=0.00001, verbose=1, mode='min'),
# EarlyStopping(monitor='val_loss', patience=patience, verbose=0),
ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, verbose=0),
]
results = model.fit_generator(train_generator,
epochs=100,
steps_per_epoch=train_imgs.shape[0] // train_batch_size,
validation_steps=val_imgs.shape[0] // val_batch_size,
validation_data=val_generator,
verbose=1, callbacks=callbacks
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment