Skip to content

Instantly share code, notes, and snippets.

View michelkana's full-sized avatar

Michel Kana michelkana

View GitHub Profile
import pandas as pd
import os
from keras.optimizers import Adam
# load celebrity images attributes
df_celeb = pd.read_csv('list_attr_celeba.csv')
TOTAL_SAMPLES = df_celeb.shape[0]
# we will downscale the images
SPATIAL_DIM = 64
from keras.layers import Deconvolution2D, Reshape
def build_generator(start_filters, filter_size, latent_dim):
# function for building a CNN block for upsampling the image
def add_generator_block(x, filters, filter_size):
x = Deconvolution2D(filters, filter_size, strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.3)(x)
return x
from keras.layers import Conv2D, BatchNormalization, Input, GlobalAveragePooling2D, Dense
from keras.models import Model
from keras.layers.advanced_activations import LeakyReLU
# function for building the discriminator layers
def build_discriminator(start_filters, spatial_dim, filter_size):
# function for building a CNN block for downsampling the image
def add_discriminator_block(x, filters, filter_size):
x = Conv2D(filters, filter_size, padding='same')(x)
from random import randint
import matplotlib.pylab as plt
import numpy as np
def get_feature_maps(model, layer_id, input_image):
model_ = Model(inputs=[model.input],
outputs=[model.layers[layer_id].output])
return model_.predict(np.expand_dims(input_image,
axis=0))[0,:,:,:].transpose((2,0,1))
# Create letter to token dictionary
chars = sorted(list(set(' '.join(df.yb))))
letter2idx = dict((c, i+1) for i, c in enumerate(chars))
# Create token to letter dictionary
idx2letter = dict((i, c) for c, i in letter2idx.items())
vocabulary_size = len(letter2idx)+1
print("Vocabulary size: ", vocabulary_size)
from keras.preprocessing import sequence
from keras.utils import to_categorical
X = []
Y = []
max_len = 0
for i, r in df.iterrows():
word_vector = [letter2idx[c] for c in r.yb]
label = r.word_type
if len(word_vector)>max_len:
from keras.models import Sequential
from keras.layers import Dense, LSTM, SimpleRNN, Flatten
from keras.layers.embeddings import Embedding
# Building a 1-layer LSTM with 100 cells with embeddings
embedding_dim = 8
model_lstm = Sequential()
model_lstm.add(Embedding(vocabulary_size, embedding_dim, input_length=max_len))
model_lstm.add(LSTM(100))
model_lstm.add(Dense(nb_labels, activation='sigmoid'))
from keras.models import Sequential
from keras.layers import Dense, LSTM, SimpleRNN, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D, MaxPooling1D
import pandas as pd
# Fully connected neural network
model_ffn = Sequential()
model_ffn.add(Dense(250, activation='relu',input_dim=max_review_length))
model_ffn.add(Dense(1, activation='sigmoid'))
max_word_len = df.yb.str.len().max()
max_word_len_utf8 = df.yb_utf8.str.len().max()
nb_labels = len(df.word_type.unique())
nb_words = df.shape[0]
print("Number of words: ", nb_words)
print("Number of labels: ", nb_labels)
print("Max word length: {} characters and {} bytes".format(max_word_len, max_word_len_utf8))
import pandas as pd
import numpy as np
# load English-Yemba dictionary as CSV file
df = pd.read_csv('https://gist.githubusercontent.com/michelkana/37ccb5c68b3c72148c2b490c917b13aa/raw/9badee0c1811fa03e8b981763e51ddc8ee56513b/english_yemba_dictionary.csv')
# display few words pairs
df.sample(frac=.1).head(15)