Skip to content

Instantly share code, notes, and snippets.

@mkolod
Last active January 13, 2017 21:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mkolod/71d899687e21f91e1e870832d4de5569 to your computer and use it in GitHub Desktop.
Save mkolod/71d899687e21f91e1e870832d4de5569 to your computer and use it in GitHub Desktop.
import numpy as np
import tensorflow as tf
from time import time
num_iter = 1000
batch_size = 64
height = 256
width = 256
channels = 3
total_images = num_iter * batch_size
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
imgs = np.random.randint(256, size = (batch_size, height, width, channels), dtype = np.uint8)
def body(dev):
def inner(x, y):
with tf.device(dev):
adjusted = tf.image.adjust_hue(y, 0.1)
return (tf.add(x, 1), adjusted)
return inner
cond = lambda x, y: tf.less(x, num_iter)
x = tf.Variable(tf.constant(0))
init_op = tf.global_variables_initializer()
init_op_2 = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
sess.run(init_op_2)
def time_it(dev_type, dev_number):
dev = "/" + dev_type + ":" + str(dev_number)
start = time()
result = tf.while_loop(cond, body(dev), [x, imgs])
sess.run(result)
total_time = time() - start
time_per_image = total_time / total_images
images_per_sec = 1.0 / time_per_image
print("\n\nDevice: %s" % dev)
print("Total images: %d" % total_images)
print("Total time: %f seconds\n" % total_time)
print("Time per image (us): %.16f" % (time_per_image * 1e6))
print("Images/sec: %.2f\n" % images_per_sec)
return images_per_sec
im_sec_gpu = time_it("gpu", 0)
im_sec_cpu = time_it("cpu", 0)
print("\n\nCPU/GPU acceleration factor: %.2f\n\n" % (im_sec_gpu / im_sec_cpu))
hue_delta = 0.1
# Numeric test
with tf.device("/cpu:0"):
cpu_result = tf.image.adjust_hue(imgs, hue_delta).eval()
with tf.device("/gpu:0"):
gpu_result = tf.image.adjust_hue(imgs, hue_delta).eval()
print("\nCPU result (hue delta: %d):\n\n%s\n" % (hue_delta, cpu_result[0:10, 0:2, 0]))
print("\nGPU result (hue delta: %d):\n\n%s\n" % (hue_delta, gpu_result[0:10, 0:2, 0]))
print("\nDifference:\n\n%s\n" % (cpu_result[0:10, 0:2, 0] - gpu_result[0:10, 0:2, 0]))
print("\nreduce_sum of difference (all pixels):\n\n%s\n" % (tf.reduce_sum(cpu_result - gpu_result).eval()))
print("max difference (all pixels): %s\n\n" % (tf.reduce_max(cpu_result - gpu_result).eval()))
new_delta = 0.5
# Random hue adjustment
with tf.device("/cpu:0"):
cpu_result = tf.image.random_hue(imgs, new_delta).eval()
with tf.device("/gpu:0"):
gpu_result = tf.image.random_hue(imgs, new_delta).eval()
rand_diff_cpu = imgs - cpu_result
rand_diff_gpu = imgs - gpu_result
print("CPU random hue vs original:\n%s\n\n" % rand_diff_cpu[0:5, 0:5, 0])
print("GPU random hue vs original:\n%s\n\n" % rand_diff_gpu[0:5, 0:5, 0])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment