Skip to content

Instantly share code, notes, and snippets.

@atamborrino
Last active June 18, 2018 08:03
Show Gist options
  • Save atamborrino/b8cfc74cff9211414826b34139de0f1c to your computer and use it in GitHub Desktop.
Save atamborrino/b8cfc74cff9211414826b34139de0f1c to your computer and use it in GitHub Desktop.
Example of use of TensorFlow Core API to train a linear regression model with standard normalization of data baked into the TF Computational Graph
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1.8.0\n"
]
}
],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"\n",
"print(tf.__version__)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Describe TF Graph: standard normalization of inputs and ouputs (z-score) + linear regression\n",
"\n",
"batch_size = 100\n",
"\n",
"a = tf.get_variable(\"a\", [1], dtype=tf.float32)\n",
"b = tf.get_variable(\"b\", [1], dtype=tf.float32)\n",
"\n",
"x = tf.placeholder(tf.float32, shape=[batch_size])\n",
"x_mean = tf.placeholder(tf.float32, shape=[1])\n",
"x_std = tf.placeholder(tf.float32, shape=[1])\n",
"x_norm = (x - x_mean) / x_std\n",
"\n",
"y = a * x_norm + b\n",
"\n",
"y_true = tf.placeholder(tf.float32, shape=[batch_size])\n",
"y_mean = tf.placeholder(tf.float32, shape=[1])\n",
"y_std = tf.placeholder(tf.float32, shape=[1])\n",
"y_true_norm = (y_true - y_mean) / y_std\n",
"\n",
"loss = tf.losses.mean_squared_error(labels=y_true_norm, predictions=y)\n",
"\n",
"optimizer = tf.train.GradientDescentOptimizer(0.01)\n",
"train = optimizer.minimize(loss)\n",
"\n",
"# note: this part of the graph will be used only at inference time\n",
"y_denorm = y * y_std + y_mean\n",
"\n",
"# write Graph to TensorBoard\n",
"writer = tf.summary.FileWriter('.')\n",
"writer.add_graph(tf.get_default_graph())"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loss: 3.5527137e-13\n"
]
},
{
"data": {
"text/plain": [
"array([ 0.9941406, 2.9941406, 4.9941406, 6.9941406, 8.994141 ,\n",
" 10.994141 , 12.993164 , 14.993164 , 16.993164 , 18.99414 ,\n",
" 20.99414 , 22.99414 , 24.99414 , 26.99414 , 28.99414 ,\n",
" 30.99414 , 32.993164 , 34.993164 , 36.993164 , 38.99414 ,\n",
" 40.99414 , 42.99414 , 44.99414 , 46.99414 , 48.99414 ,\n",
" 50.99414 , 52.993164 , 54.993164 , 56.993164 , 58.99414 ,\n",
" 60.99414 , 62.99414 , 64.99414 , 66.99414 , 68.99414 ,\n",
" 70.99414 , 72.993164 , 74.993164 , 76.993164 , 78.99414 ,\n",
" 80.99414 , 82.99414 , 84.99414 , 86.99414 , 88.99414 ,\n",
" 90.99414 , 92.993164 , 94.993164 , 96.99414 , 98.99414 ,\n",
" 100.99414 , 102.99414 , 104.99414 , 106.99414 , 108.99414 ,\n",
" 110.99414 , 112.993164 , 114.993164 , 116.99414 , 118.99414 ,\n",
" 120.99414 , 122.99414 , 124.99414 , 126.99414 , 128.99414 ,\n",
" 130.99414 , 132.99316 , 134.99316 , 136.99414 , 138.99414 ,\n",
" 140.99414 , 142.99414 , 144.99414 , 146.99414 , 148.99414 ,\n",
" 150.99316 , 152.99316 , 154.99316 , 156.99414 , 158.99414 ,\n",
" 160.99414 , 162.99414 , 164.99414 , 166.99414 , 168.99414 ,\n",
" 170.99316 , 172.99316 , 174.99316 , 176.99414 , 178.99414 ,\n",
" 180.99414 , 182.99414 , 184.99414 , 186.99414 , 188.99414 ,\n",
" 190.99316 , 192.99316 , 194.99316 , 196.99414 , 198.99414 ],\n",
" dtype=float32)"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Data\n",
"\n",
"x_values = np.arange(10000)\n",
"y_true_values = x_values * 2 + 1\n",
"\n",
"epoch = 100\n",
"\n",
"training_data_moments = {\n",
" x_mean:[np.mean(x_values)],\n",
" x_std:[np.std(x_values)],\n",
" y_mean:[np.mean(y_true_values)],\n",
" y_std:[np.std(y_true_values)]\n",
"}\n",
"\n",
"# Run training\n",
"sess = tf.Session()\n",
"\n",
"init = tf.global_variables_initializer()\n",
"sess.run(init)\n",
"\n",
"for _ in range(epoch):\n",
" for i in range(0, len(x_values), batch_size):\n",
" x_batch = x_values[i:i+batch_size]\n",
" y_batch = y_true_values[i:i+batch_size]\n",
" _, current_loss = sess.run((train, loss), feed_dict={x:x_batch, y_true:y_batch, **training_data_moments})\n",
" \n",
"print(\"Loss: \" + str(current_loss))\n",
" \n",
"# Inference (note: x normalization and y denormalization has been serialized into TF Graph)\n",
"y_test = sess.run(y_denorm, feed_dict={x:x_values[0:batch_size], **training_data_moments})\n",
"\n",
"y_test"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment