Skip to content

Instantly share code, notes, and snippets.

@tawnkramer
Created June 26, 2017 20:26
Show Gist options
  • Save tawnkramer/5d72fc62d800fc96504db09c6408cb5e to your computer and use it in GitHub Desktop.
Save tawnkramer/5d72fc62d800fc96504db09c6408cb5e to your computer and use it in GitHub Desktop.
'''
from the CarND-Alexnet-Feature-Extraction project
https://github.com/udacity/CarND-Alexnet-Feature-Extraction
This is modified to set the number of classes dynamically from the loaded data.
'''
import pickle
import time
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import numpy as np
import math
from alexnet import AlexNet
nb_classes = 43
epochs = 30
batch_size = 128
with open('./train.p', 'rb') as f:
data = pickle.load(f)
y_train = data['labels']
sign_classes, class_indices, class_counts = np.unique(y_train, return_index = True, return_counts = True)
nb_classes = class_counts.shape[0]
print('nb_classes', nb_classes)
# plt.bar( np.arange( nb_classes ), class_counts, align='center' )
# plt.xlabel('Class')
# plt.ylabel('Number of training examples')
# plt.xlim([-1, nb_classes])
# plt.show()
# imgs = data['features']
# print('labels', y_train.shape, y_train.dtype)
# print('features', imgs.shape, imgs.dtype)
# for i in range(1):
# print(y_train[i])
# img = imgs[i]
# imgplot = plt.imshow(img)
# plt.show()
X_train, X_val, y_train, y_val = train_test_split(data['features'], data['labels'], test_size=0.33, random_state=0)
features = tf.placeholder(tf.float32, (None, 32, 32, 3))
labels = tf.placeholder(tf.int64, None)
resized = tf.image.resize_images(features, (227, 227))
# Returns the second final layer of the AlexNet model,
# this allows us to redo the last layer for the traffic signs
# model.
fc7 = AlexNet(resized, feature_extract=True)
fc7 = tf.stop_gradient(fc7)
shape = (fc7.get_shape().as_list()[-1], nb_classes)
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
#testing..
logits = tf.clip_by_value(logits,1e-10,100.0)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
loss_op = tf.reduce_mean(cross_entropy)
opt = tf.train.AdamOptimizer()
train_op = opt.minimize(loss_op, var_list=[fc8W, fc8b])
init_op = tf.global_variables_initializer()
preds = tf.arg_max(logits, 1)
accuracy_op = tf.reduce_mean(tf.cast(tf.equal(preds, labels), tf.float32))
def eval_on_data(X, y, sess):
total_acc = 0
total_loss = 0
for offset in range(0, X.shape[0], batch_size):
end = offset + batch_size
X_batch = X[offset:end]
y_batch = y[offset:end]
loss, acc = sess.run([loss_op, accuracy_op], feed_dict={features: X_batch, labels: y_batch})
if math.isnan(loss):
continue
total_loss += (loss * X_batch.shape[0])
total_acc += (acc * X_batch.shape[0])
return total_loss/X.shape[0], total_acc/X.shape[0]
with tf.Session() as sess:
sess.run(init_op)
for i in range(epochs):
# training
X_train, y_train = shuffle(X_train, y_train)
t0 = time.time()
for offset in range(0, X_train.shape[0], batch_size):
end = offset + batch_size
sess.run(train_op, feed_dict={features: X_train[offset:end], labels: y_train[offset:end]})
val_loss, val_acc = eval_on_data(X_val, y_val, sess)
print("Epoch", i+1)
print("Time: %.3f seconds" % (time.time() - t0))
print("Validation Loss =", val_loss)
print("Validation Accuracy =", val_acc)
print("")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment