Skip to content

Instantly share code, notes, and snippets.

@Goddard
Created October 15, 2017 17:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Goddard/b8487fd5bfaa27f3b6b113482eb92227 to your computer and use it in GitHub Desktop.
Save Goddard/b8487fd5bfaa27f3b6b113482eb92227 to your computer and use it in GitHub Desktop.
Trying to use a pre-existing model and my additions after saving then restoring and run on video
import os.path
import tensorflow as tf
import helper
import cv2
from distutils.version import LooseVersion
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_ss(saver, sess, save_file):
saver.restore(sess, save_file)
# test_accuracy = sess.run(
# accuracy,
# feed_dict={features: mnist.test.images, labels: mnist.test.labels})
# print('Test Accuracy: {}'.format(test_accuracy))
def out_layer(sess):
vgg_tag = 'vgg16'
# vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
# vgg_layer3_out_tensor_name = 'layer3_out:0'
# vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer_output_tensor_name = 'my_output/conv2d_transpose:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
# input_w = graph.get_tensor_by_name(vgg_input_tensor_name)
probabilities = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
# layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
# layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
output_layer = graph.get_tensor_by_name(vgg_layer_output_tensor_name)
return probabilities, output_layer
def load_graph(graph_file, use_xla=False):
jit_level = 0
config = tf.ConfigProto()
if use_xla:
jit_level = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jit_level
with tf.Session(graph=tf.Graph(), config=config) as sess:
gd = tf.GraphDef()
with tf.gfile.Open(graph_file, 'rb') as f:
data = f.read()
gd.ParseFromString(data)
tf.import_graph_def(gd, name='')
ops = sess.graph.get_operations()
n_ops = len(ops)
return sess.graph, ops
def run():
with tf.Session() as sess:
runs_dir = './runs'
data_dir = './data'
save_path = os.path.join(runs_dir, '')
video_path = os.path.join(data_dir, 'driving.mp4')
video_size = (1280, 720)
# keep_prob = 0.3
model_saver = tf.train.import_meta_graph(save_path + 'model.ckpt.meta')
model_saver.restore(sess, tf.train.latest_checkpoint(save_path + ''))
# load_graph(save_path + 'frozen_model.pb')
# for i in tf.get_default_graph().get_operations():
# print(i.name)
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
probabilities = graph.get_tensor_by_name('keep_prob:0')
output_layer = graph.get_tensor_by_name('my_output/conv2d_transpose:0')
logits = tf.reshape(output_layer, (-1, 2))
# sess.run(tf.global_variables_initializer())
# clip = VideoFileClip(video_path)
cap = cv2.VideoCapture(video_path)
while not cap.isOpened():
cap = cv2.VideoCapture(video_path)
cv2.waitKey(1000)
print("Wait for the header")
pos_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
while True:
flag, frame = cap.read()
if flag:
# The frame is ready and already captured
image = helper.gen_test_video(sess, logits, probabilities, image_input, frame, video_size)
cv2.imshow('video', image)
pos_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
print(str(pos_frame) + " frames")
else:
# The next frame is not ready, so we try to read it again
cap.set(cv2.CAP_PROP_POS_FRAMES, pos_frame - 1)
print("frame is not ready")
# It is better to wait for a while for the next frame to be ready
cv2.waitKey(1000)
if cv2.waitKey(10) == 27:
break
if cap.get(cv2.CAP_PROP_POS_FRAMES) == cap.get(cv2.CAP_PROP_FRAME_COUNT):
# If the number of captured frames is equal to the total number of frames,
# we stop
break
#
# new_clip = clip.fl_image(image)
# new_clip.write_videofile("result.mp4")
if __name__ == '__main__':
run()
@Goddard
Copy link
Author

Goddard commented Oct 15, 2017

Getting this error
InvalidArgumentError (see above for traceback): Incompatible shapes: [1,46,80,512] vs. [1,45,80,512]
[[Node: Add = Add[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](conv2d_transpose/BiasAdd, layer4_out)]]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment