Skip to content

Instantly share code, notes, and snippets.

View gitshanks's full-sized avatar

Nishank Sharma gitshanks

View GitHub Profile
#Compliling the model with adam optimixer and categorical crossentropy loss
model.compile(loss=categorical_crossentropy,
optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7),
metrics=['accuracy'])
#training the model
model.fit(np.array(X_train), np.array(y_train),
batch_size=batch_size,
epochs=epochs,
verbose=1,
#desinging the CNN
model = Sequential()
model.add(Conv2D(num_features, kernel_size=(3, 3), activation='relu', input_shape=(width, height, 1), data_format='channels_last', kernel_regularizer=l2(0.01)))
model.add(Conv2D(num_features, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))
import sys, os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.regularizers import l2
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv('./fer2013.csv')
width, height = 48, 48
datapoints = data['pixels'].tolist()
pip3 install tensorflow
pip3 install keras
pip3 install numpy
pip3 install sklearn
pip3 install pandas
pip3 install opencv-python
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2)
print("\nX_train:\n")
print(X_train.head())
print(X_train.shape)
print("\nX_test:\n")
print(X_test.head())
print(X_test.shape)
Total Epochs: 15
Epoch 0 loss: 3726815.363067627
Epoch 1 loss: 851461.7670974731
Epoch 2 loss: 419299.2051668167
Epoch 3 loss: 219893.16816997528
Epoch 4 loss: 151192.65248513222
Epoch 5 loss: 106766.79851920903
Epoch 6 loss: 84161.97843265533
Epoch 7 loss: 84587.66163202096
Epoch 8 loss: 80072.91707116365
def train(x):
prediction = nnmodel(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
epochs = 15
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def nnmodel(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, hl1])),
'biases':tf.Variable(tf.random_normal([hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([hl1, hl2])),
'biases':tf.Variable(tf.random_normal([hl2]))}
hl1 = 1000
hl2 = 1000
hl3 = 1000
outputl = 10
batch = 100