-
-
Save ryancheunggit/37cf2c84473168c60a3ea64445907e27 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# chunk 0 | |
import inspect | |
import time | |
import numpy as np | |
import tensorflow as tf | |
from pprint import pprint | |
print(tf.__version__) | |
tf.random.set_seed(42) | |
np.random.seed(42) | |
true_weights = tf.constant(list(range(5)), dtype=tf.float32)[:, tf.newaxis] | |
x = tf.constant(tf.random.uniform((32, 5)), dtype=tf.float32) | |
y = tf.constant(x @ true_weights, dtype=tf.float32) | |
# chunk 1 | |
def f(a, b, power=2, d=3): | |
return tf.pow(a, power) + d * b | |
converted_f = tf.autograph.to_graph(f) | |
print(inspect.getsource(converted_f)) | |
# chunk 2 | |
def cube(x): | |
o = x | |
for _ in range(2): | |
o *= x | |
return o | |
converted_cube = tf.autograph.to_graph(cube) | |
print(inspect.getsource(converted_cube)) | |
# chunk 3 | |
def g(x): | |
if tf.reduce_any(x < 0): | |
return tf.square(x) | |
return x | |
converted_g = tf.autograph.to_graph(g) | |
print(inspect.getsource(converted_g)) | |
# chunk 4 | |
tf_func_f = tf.function(autograph=False)(f) | |
tf_func_g = tf.function(autograph=False)(converted_g) | |
tf_func_g2 = tf.function(autograph=True)(g) | |
print(tf_func_f.python_function is f) | |
print(tf_func_g.python_function is converted_g) | |
print(tf_func_g2.python_function is g) | |
# chunk 5 | |
concrete_g = tf_func_g.get_concrete_function(x=tf.TensorSpec(shape=[3], dtype=tf.float32)) | |
print(concrete_g) | |
# chunk 6 | |
pprint(concrete_g(tf.constant([-1, 1, -2], dtype=tf.float32))) | |
pprint(tf_func_g(tf.constant([-1, 1, -2], dtype=tf.float32))) | |
# chunk 7 | |
concrete_f = tf_func_f.get_concrete_function(a=tf.TensorSpec(shape=[1], dtype=tf.float32), b=tf.TensorSpec(shape=[1], dtype=tf.float32)) | |
print(concrete_f) | |
pprint(concrete_f(tf.constant(1.), tf.constant(2.))) | |
pprint(tf_func_f(1., 2.)) | |
pprint(tf_func_f(a=tf.constant(1., dtype=tf.float32), b=2, power=2.)) | |
pprint(tf_func_f(a=tf.constant(1., dtype=tf.float32), b=2., d=3)) | |
pprint(tf_func_f(a=tf.constant(1., dtype=tf.float32), b=2., d=3., power=3.)) | |
# chunk 8 | |
print(tf_func_f._get_tracing_count()) | |
# chunk 9 | |
for i, f in enumerate(tf_func_f._list_all_concrete_functions_for_serialization()): | |
print(i, f.structured_input_signature) | |
# chunk 10 | |
@tf.function(autograph=False) | |
def square(x): | |
return x * x | |
def square(x): return x * x | |
square = tf.function(autograph=False)(square) | |
# chunk 11 | |
t0 = time.time() | |
for iteration in range(1001): | |
with tf.GradientTape() as tape: | |
y_hat = tf.linalg.matmul(x, weights) | |
loss = tf.reduce_mean(tf.square(y - y_hat)) | |
if not (iteration % 200): | |
print('mean squared loss at iteration {:4d} is {:5.4f}'.format(iteration, loss)) | |
gradients = tape.gradient(loss, weights) | |
weights.assign_add(-0.05 * gradients) | |
pprint(weights) | |
print('time took: {} seconds'.format(time.time() - t0)) | |
# chunk 12 | |
t0 = time.time() | |
weights = tf.Variable(tf.random.uniform((5, 1)), dtype=tf.float32) | |
@tf.function | |
def train_step(): | |
with tf.GradientTape() as tape: | |
y_hat = tf.linalg.matmul(x, weights) | |
loss = tf.reduce_mean(tf.square(y - y_hat)) | |
gradients = tape.gradient(loss, weights) | |
weights.assign_add(-0.05 * gradients) | |
return loss | |
for iteration in range(1001): | |
loss = train_step() | |
if not (iteration % 200): | |
print('mean squared loss at iteration {:4d} is {:5.4f}'.format(iteration, loss)) | |
pprint(weights) | |
print('time took: {} seconds'.format(time.time() - t0)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment