Skip to content

Instantly share code, notes, and snippets.

@dkohlsdorf
Created March 12, 2020 11:26
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dkohlsdorf/64cf9c3915010e00ec2a475a09a4220f to your computer and use it in GitHub Desktop.
Save dkohlsdorf/64cf9c3915010e00ec2a475a09a4220f to your computer and use it in GitHub Desktop.
Recursive Auto Encoder in Tensorflow 2.0
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:Layer concatenate_34 is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n",
"\n",
"If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n",
"\n",
"To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n",
"\n",
"tf.Tensor(7271.5244, shape=(), dtype=float32)\n",
"tf.Tensor(7068.2646, shape=(), dtype=float32)\n",
"tf.Tensor(6871.754, shape=(), dtype=float32)\n",
"tf.Tensor(6681.8955, shape=(), dtype=float32)\n",
"tf.Tensor(6498.5864, shape=(), dtype=float32)\n",
"tf.Tensor(6321.701, shape=(), dtype=float32)\n",
"tf.Tensor(6151.1147, shape=(), dtype=float32)\n",
"tf.Tensor(11264.176, shape=(), dtype=float32)\n",
"tf.Tensor(10979.557, shape=(), dtype=float32)\n",
"tf.Tensor(8979.219, shape=(), dtype=float32)\n",
"tf.Tensor(8713.383, shape=(), dtype=float32)\n",
"tf.Tensor(8445.977, shape=(), dtype=float32)\n",
"tf.Tensor(8181.042, shape=(), dtype=float32)\n",
"tf.Tensor(7921.3525, shape=(), dtype=float32)\n",
"tf.Tensor(7668.8037, shape=(), dtype=float32)\n",
"tf.Tensor(7424.6533, shape=(), dtype=float32)\n",
"tf.Tensor(7189.6924, shape=(), dtype=float32)\n",
"tf.Tensor(6964.3525, shape=(), dtype=float32)\n",
"tf.Tensor(6748.806, shape=(), dtype=float32)\n",
"tf.Tensor(6543.033, shape=(), dtype=float32)\n",
"tf.Tensor(6346.8706, shape=(), dtype=float32)\n",
"tf.Tensor(6160.0586, shape=(), dtype=float32)\n",
"tf.Tensor(5982.2686, shape=(), dtype=float32)\n",
"tf.Tensor(5083.8223, shape=(), dtype=float32)\n",
"tf.Tensor(4974.8525, shape=(), dtype=float32)\n",
"tf.Tensor(4868.525, shape=(), dtype=float32)\n",
"tf.Tensor(4765.2173, shape=(), dtype=float32)\n",
"tf.Tensor(4665.1934, shape=(), dtype=float32)\n",
"tf.Tensor(4568.6294, shape=(), dtype=float32)\n",
"tf.Tensor(4475.6265, shape=(), dtype=float32)\n",
"tf.Tensor(4386.221, shape=(), dtype=float32)\n",
"tf.Tensor(4300.3975, shape=(), dtype=float32)\n",
"tf.Tensor(4218.086, shape=(), dtype=float32)\n",
"tf.Tensor(4139.177, shape=(), dtype=float32)\n",
"tf.Tensor(4063.5273, shape=(), dtype=float32)\n",
"tf.Tensor(4452.015, shape=(), dtype=float32)\n",
"tf.Tensor(4372.8394, shape=(), dtype=float32)\n",
"tf.Tensor(4294.2334, shape=(), dtype=float32)\n",
"tf.Tensor(4216.547, shape=(), dtype=float32)\n",
"tf.Tensor(4140.0522, shape=(), dtype=float32)\n",
"tf.Tensor(4064.9604, shape=(), dtype=float32)\n",
"tf.Tensor(3991.4253, shape=(), dtype=float32)\n"
]
}
],
"source": [
"def merge_encoder(n_in):\n",
" a = tf.keras.layers.Input(n_in)\n",
" b = tf.keras.layers.Input(n_in)\n",
" c = tf.keras.layers.Concatenate()([a,b])\n",
" h = tf.keras.layers.Dense(n_in)(c)\n",
" o = tf.keras.layers.Dense(n_in * 2)(h)\n",
" merge = tf.keras.models.Model(inputs=[a, b], outputs=[h, c, o])\n",
" return merge\n",
"\n",
"\n",
"class Node:\n",
" \n",
" def __init__(self, i, embedding, score, l = None, r = None):\n",
" self.i = i\n",
" self.score = score\n",
" self.embedding = embedding\n",
" self.left = l\n",
" self.right = r\n",
" \n",
" def print(self, offset=\"\"):\n",
" print(\"{} {} {} {}\".format(offset, self.i, self.score, np.mean(self.embeding)))\n",
" if self.left is not None and self.right is not None:\n",
" self.left.print(offset + \"\\t\")\n",
" self.right.print(offset + \"\\t\")\n",
"\n",
" def merge(self, other, merger):\n",
" merged = merger([self.embedding, other.embedding])\n",
" h = merged[0]\n",
" c = merged[1]\n",
" y = merged[2]\n",
" score = tf.nn.l2_loss(y - c) + self.score + other.score\n",
" return Node(-1, h, score, self, other)\n",
" \n",
" \n",
"def ts2leafs(ts):\n",
" return [Node(i,ts[i], tf.constant(0.0)) for i in range(0, len(ts))]\n",
" \n",
" \n",
"def merge(x, m):\n",
" if len(x) > 1:\n",
" min_loss = float('inf')\n",
" min_node = None\n",
" min_i = 0\n",
" min_j = 0\n",
" for i in range(len(x)):\n",
" for j in range(len(x)):\n",
" if i < j:\n",
" node = x[i].merge(x[j], m)\n",
" if node.score < min_loss:\n",
" min_node = node\n",
" min_loss = node.score\n",
" min_i = i\n",
" min_j = j\n",
" next_x = x.copy()\n",
" next_x[min_i] = min_node\n",
" next_x = [next_x[idx] for idx in range(0, len(x)) if idx != min_j]\n",
" return merge(next_x, m)\n",
" else:\n",
" return x[0]\n",
" \n",
"\n",
"m = merge_encoder(10)\n",
"x = ts2leafs([np.ones((1, 10)) * i for i in range(0, 10)] + [np.ones((1, 10)) * i for i in range(0, 10)])\n",
"optimizer = tf.keras.optimizers.Adam()\n",
"for i in range(0, 100):\n",
" with tf.GradientTape(watch_accessed_variables=True) as tape:\n",
" tape.watch(m.variables) \n",
" node = merge(x, m)\n",
" print(node.score)\n",
" g = tape.gradient(node.score, m.variables)\n",
" optimizer.apply_gradients(zip(g, m.variables))\n",
"m.save('merger.h5')"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" -1 31.54754066467285\n",
"\t -1 26.951189041137695\n",
"\t\t -1 24.700815200805664\n",
"\t\t\t -1 23.471729278564453\n",
"\t\t\t\t -1 22.11617088317871\n",
"\t\t\t\t\t -1 19.61811637878418\n",
"\t\t\t\t\t\t -1 15.229199409484863\n",
"\t\t\t\t\t\t\t -1 9.397011756896973\n",
"\t\t\t\t\t\t\t\t -1 3.7285311222076416\n",
"\t\t\t\t\t\t\t\t\t -1 0.11553169786930084\n",
"\t\t\t\t\t\t\t\t\t\t 0 0.0\n",
"\t\t\t\t\t\t\t\t\t\t 10 0.0\n",
"\t\t\t\t\t\t\t\t\t -1 0.08206614851951599\n",
"\t\t\t\t\t\t\t\t\t\t 1 0.0\n",
"\t\t\t\t\t\t\t\t\t\t 11 0.0\n",
"\t\t\t\t\t\t\t\t -1 0.06779717653989792\n",
"\t\t\t\t\t\t\t\t\t 2 0.0\n",
"\t\t\t\t\t\t\t\t\t 12 0.0\n",
"\t\t\t\t\t\t\t -1 0.07272468507289886\n",
"\t\t\t\t\t\t\t\t 3 0.0\n",
"\t\t\t\t\t\t\t\t 13 0.0\n",
"\t\t\t\t\t\t -1 0.09684870392084122\n",
"\t\t\t\t\t\t\t 4 0.0\n",
"\t\t\t\t\t\t\t 14 0.0\n",
"\t\t\t\t\t -1 0.14016912877559662\n",
"\t\t\t\t\t\t 5 0.0\n",
"\t\t\t\t\t\t 15 0.0\n",
"\t\t\t\t -1 0.20268720388412476\n",
"\t\t\t\t\t 6 0.0\n",
"\t\t\t\t\t 16 0.0\n",
"\t\t\t -1 0.28439754247665405\n",
"\t\t\t\t 7 0.0\n",
"\t\t\t\t 17 0.0\n",
"\t\t -1 0.3853088319301605\n",
"\t\t\t 8 0.0\n",
"\t\t\t 18 0.0\n",
"\t -1 0.5054189562797546\n",
"\t\t 9 0.0\n",
"\t\t 19 0.0\n"
]
}
],
"source": [
"node.print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment