Skip to content

Instantly share code, notes, and snippets.

@spokendotcpp
Created March 26, 2019 15:14
Show Gist options
  • Save spokendotcpp/4fc362d29046e90e188c2bc06c91fd9f to your computer and use it in GitHub Desktop.
Save spokendotcpp/4fc362d29046e90e188c2bc06c91fd9f to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import os
import cv2
import argparse # Argument project
from matplotlib.colors import to_rgb
from matplotlib.image import imread
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Conv2D, Flatten, MaxPooling2D, Input
from keras.layers.normalization import BatchNormalization
from keras.datasets import mnist
from keras.optimizers import SGD
"""
Created on
Mon 18 Feb 2019
@author : Julien Marcellesi
"""
taille_image = 200 * 200
def rgbaToGrey(image):
rgb = to_rgb(image)
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def dataSet():
path_train = "sequences/4665440_200x200"
path_valid = ""
datas = []
labels = []
if not os.path.isdir(path_train):
print(path_train, "is not a Valid directory.")
# Read all images found into the given directory
for file in os.listdir(path_train):
if file.lower().endswith(('.jpg', '.jpeg', '.png')):
img = cv2.imread(path_train + "/" + file)
datas.append(cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY))
labels.append(1)
"""
for i in range(10000):
datas.append(np.random.randint(255, size=(200, 200)))
labels.append(0)
list_dir = os.listdir(path_valid)
datas_valid = []
labels_valid = []
for directory in list_dir:
list_file = os.listdir(path_valid + directory)
for filename in list_file:
img = cv2.imread(path_valid + directory + "/" + filename)
datas_valid.append(cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY))
labels_valid.append(1)
for i in range(1000):
datas_valid.append(np.random.randint(255, size=(200, 200)))
labels_valid.append(0)"""
return np.array(datas).reshape(len(datas), len(datas[0])*len(datas[0][0])), np.array(labels)
def generator():
model = Sequential()
model.add(Dense(20, input_dim=20))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(100))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(400))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(200 * 200))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(BatchNormalization())
model.add(Activation('sigmoid'))
return model
def discriminator():
model = Sequential()
model.add(Dense(512, input_dim=200 * 200))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(units=1))
model.add(Activation('sigmoid'))
optimizer = SGD(lr=0.1)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['acc'])
return model
def gan(gen, discr):
discr.trainable = False
model = Sequential()
model.add(gen)
model.add(discr)
optimizer = SGD(lr=0.1)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
return model
def model_superviser():
input = Input(shape=(200, 200, 1), name="input")
model = Conv2D(32, kernel_size=(64, 64), activation='relu')(input)
model = Conv2D(32, kernel_size=(32, 32), activation='relu')(model)
# model = Conv2D(128, kernel_size = (10,10),activation = 'relu') (model)
model = MaxPooling2D(pool_size=(7, 7))(model)
model = Flatten()(model)
model = Dense(256, activation="relu")(model)
model = Dropout(0.2)(model)
output = Dense(units=1, activation="softmax")(model)
model = Model(inputs=input, outputs=output)
model.compile(loss='binary_crossentropy', optimizer='Adagrad', metrics=['acc'])
return model
if __name__ == '__main__':
datas_train, labels_train = dataSet()
gen = generator()
dis = discriminator()
gan = gan(gen, dis)
print(datas_train.shape)
print(gan.summary())
epochs = 1000
batch_size = 1000
input_size = 20
num_batches = int(datas_train.shape[0] / batch_size)
gan_loss = []
discriminator_loss = []
for epoch in range(epochs):
for index in range(num_batches):
# Generative data
noise = np.random.uniform(0, 1, size=[batch_size, input_size])
generated_data = gen.predict_on_batch(noise)
# Training data chosen from Mnist samples
training_data = datas_train[index * batch_size: (index + 1) * batch_size]
X = np.vstack((generated_data, training_data))
y = np.zeros(2 * batch_size)
y[:batch_size] = 1
# Train discriminator
lossD = dis.train_on_batch(x=X, y=y)
# Train generator (Seemingly train GAN but the discriminator in the model is disabled to train.)
noise = np.random.uniform(0, 1, size=[batch_size, input_size])
y = np.zeros(batch_size)
lossG = gan.train_on_batch(x=noise, y=y)
discriminator_loss.append(lossD)
gan_loss.append(lossG)
if epoch % 10 == 0:
print(epoch, " lossD=", np.mean(discriminator_loss), " lossG=", np.mean(gan_loss))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment