Last active
June 19, 2020 09:13
-
-
Save gngdb/aa0766293b1e689d06fbde8d430a014c to your computer and use it in GitHub Desktop.
How to use autograd inside Tensorflow
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import autograd.numpy as np | |
from autograd import grad | |
from tensorflow.python.framework import function | |
rng = np.random.RandomState(42) | |
x_np = rng.randn(4,4).astype(np.float32) | |
with tf.device('/cpu:0'): | |
x = tf.Variable(x_np) | |
def tf_loss(a): | |
return tf.reduce_sum(tf.square(a)) | |
def np_loss(a): | |
return np.array(2.).astype(np.float32)*np.square(a).sum() | |
grad_np_loss = grad(np_loss) | |
l = tf_loss(x) | |
g = tf.gradients(l, x) | |
with tf.device('/cpu:0'): | |
np_in_tf = tf.py_func(np_loss, [x], tf.float32) | |
npgrad_in_tf = tf.py_func(grad_np_loss, [x], tf.float32) | |
@function.Defun() | |
def op_grad(x, grad): | |
return [tf.py_func(grad_np_loss, [x], tf.float32)] | |
@function.Defun(grad_func=op_grad) | |
def tf_replaced_grad_loss(a): | |
return tf_loss(a) | |
with tf.device('/cpu:0'): | |
tf_np_grad = tf.gradients(tf_replaced_grad_loss(x),x) | |
with tf.Session() as sess: | |
sess.run(tf.global_variables_initializer()) | |
print("Tensorflow gradient:\n") | |
print(sess.run(g)[0]) | |
print("\nNumpy gradient (should be 2 times tf version):\n") | |
print(grad_np_loss(x_np)) | |
print("\nNumpy gradient evaluated in Tensorflow:\n") | |
print(sess.run(npgrad_in_tf)) | |
print("\nNumpy gradient put in Tensorflow graph:\n") | |
print(sess.run(tf_np_grad)[0]) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment