Skip to content

Instantly share code, notes, and snippets.

What would you like to do?
def tf_pca(x):
Compute PCA on the bottom two dimensions of x,
eg assuming dims = [..., observations, features]
# Center
x -= tf.reduce_mean(x, -2, keepdims=True)
# Currently, the GPU implementation of SVD is awful.
# It is slower than moving data back to CPU to SVD there
with tf.device('/cpu:0'):
ss, us, vs = tf.svd(x, full_matrices=False, compute_uv=True)
ss = tf.expand_dims(ss, -2)
projected_data = us * ss
# Selection of sign of axes is arbitrary.
# This replicates sklearn's PCA by duplicating flip_svd
r = projected_data
abs_r = tf.abs(r)
m = tf.equal(abs_r, tf.reduce_max(abs_r, axis=-2, keepdims=True))
signs = tf.sign(tf.reduce_sum(r * tf.cast(m, r.dtype), axis=-2, keepdims=True))
result = r * signs
return result
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.