Skip to content

Instantly share code, notes, and snippets.

@achimnol
Forked from j-min/test_single_gpu.py
Last active October 31, 2019 12:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save achimnol/e79918d5fe7175fb4fa049b789df791e to your computer and use it in GitHub Desktop.
Save achimnol/e79918d5fe7175fb4fa049b789df791e to your computer and use it in GitHub Desktop.
TensorFlow single GPU example
from __future__ import print_function
'''
Basic Multi GPU computation example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
'''
This tutorial requires your machine to have 1 GPU
"/cpu:0": The CPU of your machine.
"/gpu:0": The first GPU of your machine
'''
import numpy as np
import tensorflow as tf
import time
matrix_size = 1000
extra_wait = True
config = tf.ConfigProto()
config.log_device_placement = True
config.gpu_options.allow_growth = True
num_iterations = 10
'''
Example: compute A^n + B^n on 2 GPUs
Results on 8 cores with 2 GTX-980:
* Single GPU computation time: 0:00:11.277449
* Multi GPU computation time: 0:00:07.131701
'''
A = np.random.rand(matrix_size, matrix_size).astype('float32')
B = np.random.rand(matrix_size, matrix_size).astype('float32')
c1 = []
c2 = []
def matpow(M, n):
if n < 1: #Abstract cases where n < 1
return M
else:
return tf.matmul(M, matpow(M, n-1))
with tf.device('/gpu:0'):
a = tf.placeholder(tf.float32, [matrix_size, matrix_size])
b = tf.placeholder(tf.float32, [matrix_size, matrix_size])
c1.append(matpow(a, num_iterations))
c1.append(matpow(b, num_iterations))
with tf.device('/cpu:0'):
sum = tf.add_n(c1)
t1_1 = time.monotonic()
with tf.Session(config=config) as sess:
sess.run(sum, {a:A, b:B})
t2_1 = time.monotonic()
print("Computation time: " + str(t2_1-t1_1))
if extra_wait:
try:
time.sleep(10000)
except KeyboardInterrupt:
pass
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment