Skip to content

Instantly share code, notes, and snippets.

@DEKHTIARJonathan
Last active November 13, 2018 15:13
Show Gist options
  • Save DEKHTIARJonathan/13ff9b8043c467d6033660f82d01f419 to your computer and use it in GitHub Desktop.
Save DEKHTIARJonathan/13ff9b8043c467d6033660f82d01f419 to your computer and use it in GitHub Desktop.
# command to launch the script:
python tensorflow_test.py
# command to watch `nvidia-smi`
watch -n 0.3 nvidia-smi
Tue Nov 13 17:04:18 2018
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 410.73 Driver Version: 410.73 CUDA Version: 10.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Quadro GP100 Off | 00000000:17:00.0 Off | Off |
| 53% 70C P0 218W / 235W | 2441MiB / 16278MiB | 97% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 26139 C python 2429MiB |
+-----------------------------------------------------------------------------+
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import time
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.DEBUG)
def dense_layer(
inputs,
n_units=100,
act=None,
W_init=tf.truncated_normal_initializer(stddev=0.1),
b_init=tf.constant_initializer(value=0.0),
name='dense'
):
tf.logging.debug(
"[DUMMY MODEL] DenseLayer %s: n units: %d, act: %s" %
(name, n_units, act.__name__ if act is not None else 'No Activation')
)
if inputs.get_shape().ndims != 2:
raise AssertionError("The input dimension must be rank 2, please reshape or flatten it")
n_in = int(inputs.get_shape()[-1])
with tf.variable_scope(name):
weight_var = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=inputs.dtype)
outputs = tf.matmul(inputs, weight_var)
if b_init is not None:
try:
biais_var = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=inputs.dtype)
except Exception: # If initializer is a constant, do not specify shape.
biais_var = tf.get_variable(name='b', initializer=b_init, dtype=inputs.dtype)
outputs = tf.nn.bias_add(outputs, biais_var, name='bias_add')
if act is not None:
outputs = act(outputs)
return outputs
def build_model(input_plh):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
tf.logging.debug("[DUMMY MODEL] Model is building ...")
start_time = time.time()
net = dense_layer(input_plh, n_units=32, act=tf.nn.relu, name="dense_1")
net = dense_layer(net, n_units=64, act=tf.nn.relu, name="dense_2")
net = dense_layer(net, n_units=128, act=tf.nn.relu, name="dense_3")
net = dense_layer(net, n_units=256, act=tf.nn.relu, name="dense_4")
net = dense_layer(net, n_units=512, act=tf.nn.relu, name="dense_5")
net = dense_layer(net, n_units=1024, act=tf.nn.relu, name="dense_6")
net = dense_layer(net, n_units=2048, act=tf.nn.relu, name="dense_7")
net = dense_layer(net, n_units=4096, act=tf.nn.relu, name="dense_8")
net = dense_layer(net, n_units=8192, act=tf.nn.relu, name="dense_9_1")
net = dense_layer(net, n_units=8192, act=tf.nn.relu, name="dense_9_2")
net = dense_layer(net, n_units=8192, act=tf.nn.relu, name="dense_9_3")
net = dense_layer(net, n_units=8192, act=tf.nn.relu, name="dense_9_4")
net = dense_layer(net, n_units=8192, act=tf.nn.relu, name="dense_9_5")
net = dense_layer(net, n_units=8192, act=tf.nn.relu, name="dense_9_6")
logits = dense_layer(net, n_units=1000, act=None, name="dense_10")
probs = tf.nn.softmax(logits, name="probs")
tf.logging.debug("[DUMMY MODEL] Model building finished in %03d msecs" % ((time.time() - start_time) * 1000))
return probs
if __name__ == "__main__":
BATCH_SIZE = 2000
input_plh = tf.placeholder(tf.float32, [BATCH_SIZE, 500])
model = build_model(input_plh)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
synthetic_data = np.random.random((BATCH_SIZE, 500))
for step in range(1000):
if (step + 1) % 10 == 0:
tf.logging.debug("[DUMMY MODEL] Step %d" % (step + 1))
_ = sess.run(model, feed_dict={input_plh: synthetic_data})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment