Skip to content

Instantly share code, notes, and snippets.

@nubbel
Last active March 18, 2019 18:53
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save nubbel/a26b6cbb21c34bc36abbe76b850f5541 to your computer and use it in GitHub Desktop.
Save nubbel/a26b6cbb21c34bc36abbe76b850f5541 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"import tensorflow as tf\n",
"from tensorflow.contrib.learn.python.learn.utils import export\n",
"from tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n",
"from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils\n",
"from tensorflow.contrib.session_bundle import manifest_pb2\n",
"from tensorflow.contrib.session_bundle import exporter\n",
"import pandas as pd\n",
"\n",
"tf.logging.set_verbosity(tf.logging.DEBUG)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>id</th>\n",
" <th>first_name</th>\n",
" <th>last_name</th>\n",
" <th>email</th>\n",
" <th>age</th>\n",
" <th>gender</th>\n",
" <th>state</th>\n",
" <th>purchase_recency</th>\n",
" <th>purchase_frequency</th>\n",
" <th>monetary_value</th>\n",
" <th>interaction_recency</th>\n",
" <th>interaction_frequency</th>\n",
" <th>target</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>88838</td>\n",
" <td>Sarina</td>\n",
" <td>Friesen</td>\n",
" <td>sarina_friesen@example.org</td>\n",
" <td>20</td>\n",
" <td>n/a</td>\n",
" <td>DE-BE</td>\n",
" <td>1</td>\n",
" <td>5</td>\n",
" <td>9</td>\n",
" <td>10</td>\n",
" <td>5</td>\n",
" <td>False</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>31524</td>\n",
" <td>Nella</td>\n",
" <td>Heller</td>\n",
" <td>heller_nella@example.org</td>\n",
" <td>10</td>\n",
" <td>male</td>\n",
" <td>n/a</td>\n",
" <td>8</td>\n",
" <td>8</td>\n",
" <td>10</td>\n",
" <td>2</td>\n",
" <td>3</td>\n",
" <td>True</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>3118</td>\n",
" <td>Alisa</td>\n",
" <td>Deckow</td>\n",
" <td>alisa.deckow@example.org</td>\n",
" <td>40</td>\n",
" <td>female</td>\n",
" <td>DE-NW</td>\n",
" <td>0</td>\n",
" <td>9</td>\n",
" <td>7</td>\n",
" <td>6</td>\n",
" <td>9</td>\n",
" <td>False</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>93388</td>\n",
" <td>Edmond</td>\n",
" <td>Zemlak</td>\n",
" <td>zemlak_edmond@example.net</td>\n",
" <td>25</td>\n",
" <td>n/a</td>\n",
" <td>DE-MV</td>\n",
" <td>4</td>\n",
" <td>10</td>\n",
" <td>7</td>\n",
" <td>4</td>\n",
" <td>9</td>\n",
" <td>True</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>32469</td>\n",
" <td>Garry</td>\n",
" <td>Moen</td>\n",
" <td>moen.garry@example.net</td>\n",
" <td>40</td>\n",
" <td>female</td>\n",
" <td>DE-BE</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>8</td>\n",
" <td>False</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" id first_name last_name email age gender state \\\n",
"0 88838 Sarina Friesen sarina_friesen@example.org 20 n/a DE-BE \n",
"1 31524 Nella Heller heller_nella@example.org 10 male n/a \n",
"2 3118 Alisa Deckow alisa.deckow@example.org 40 female DE-NW \n",
"3 93388 Edmond Zemlak zemlak_edmond@example.net 25 n/a DE-MV \n",
"4 32469 Garry Moen moen.garry@example.net 40 female DE-BE \n",
"\n",
" purchase_recency purchase_frequency monetary_value interaction_recency \\\n",
"0 1 5 9 10 \n",
"1 8 8 10 2 \n",
"2 0 9 7 6 \n",
"3 4 10 7 4 \n",
"4 3 3 3 3 \n",
"\n",
" interaction_frequency target \n",
"0 5 False \n",
"1 3 True \n",
"2 9 False \n",
"3 9 True \n",
"4 8 False "
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model_dir = \"/models/targeting\"\n",
"\n",
"train_file = \"data/customers-rfmi.train.csv\"\n",
"test_file = \"data/customers-rfmi.test.csv\"\n",
"\n",
"LABEL_COLUMN = \"target\"\n",
"COLUMNS = [\"id\", \"gender\", \"age\", LABEL_COLUMN]\n",
"CATEGORICAL_COLUMNS = [\"gender\", \"state\"]\n",
"CONTINUOUS_COLUMNS = [\"age\",\n",
" \"purchase_recency\", \"purchase_frequency\", \"monetary_value\",\n",
" \"interaction_recency\", \"interaction_frequency\"]\n",
"\n",
"def load_df(filename):\n",
" df = pd.read_csv(filename, skipinitialspace=True)\n",
" df['gender'] = df['gender'].fillna('n/a')\n",
" df['state'] = df['state'].fillna('n/a')\n",
" return df\n",
" \n",
"df_train, df_test = load_df(train_file), load_df(test_file)\n",
"df_train.head()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def input_fn(df):\n",
" # Creates a dictionary mapping from each continuous feature column name (k) to\n",
" # the values of that column stored in a constant Tensor.\n",
" continuous_cols = {k: tf.constant(df[k].values)\n",
" for k in CONTINUOUS_COLUMNS}\n",
"\n",
" # Creates a dictionary mapping from each categorical feature column name (k)\n",
" # to the values of that column stored in a tf.SparseTensor.\n",
" categorical_cols = {k: tf.SparseTensor(\n",
" indices=[[i, 0] for i in range(df[k].size)],\n",
" values=df[k].values,\n",
" shape=[df[k].size, 1])\n",
" for k in CATEGORICAL_COLUMNS}\n",
" \n",
" # Merges the two dictionaries into one.\n",
" feature_cols = dict(continuous_cols.items() + categorical_cols.items())\n",
" # Converts the label column into a constant Tensor.\n",
" label = tf.constant(df[LABEL_COLUMN].values)\n",
" # Returns the feature columns and the label.\n",
" return feature_cols, label"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Sparse base columns.\n",
"gender = tf.contrib.layers.sparse_column_with_keys(column_name=\"gender\",\n",
" keys=[\"female\", \"male\"],\n",
" combiner=\"sum\")\n",
"state = tf.contrib.layers.sparse_column_with_hash_bucket(\"state\", hash_bucket_size=100, combiner=\"sum\")\n",
"\n",
"\n",
"# Continuous base columns.\n",
"age = tf.contrib.layers.real_valued_column(\"age\")\n",
"purchase_recency = tf.contrib.layers.real_valued_column(\"purchase_recency\")\n",
"purchase_frequency = tf.contrib.layers.real_valued_column(\"purchase_frequency\")\n",
"monetary_value = tf.contrib.layers.real_valued_column(\"monetary_value\")\n",
"interaction_recency = tf.contrib.layers.real_valued_column(\"interaction_recency\")\n",
"interaction_frequency = tf.contrib.layers.real_valued_column(\"interaction_frequency\")\n",
"\n",
"# Transformations.\n",
"age_buckets = tf.contrib.layers.bucketized_column(age,\n",
" boundaries=[\n",
" 10, 14, 18, 21, \n",
" 25, 30, 35, 40,\n",
" 50, 55, 60, 65\n",
" ])\n",
"age_x_gender = tf.contrib.layers.crossed_column([age_buckets, gender], hash_bucket_size=int(1e4), combiner='sum')\n",
"\n",
"# embeddings\n",
"gender_embedding = tf.contrib.layers.embedding_column(gender, dimension=8, combiner=\"mean\")\n",
"state_embedding = tf.contrib.layers.embedding_column(state, dimension=8, combiner=\"mean\")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:tensorflow:Using default config.\n",
"INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff0c9f7aa10>, 'tf_config': gpu_options {\n",
" per_process_gpu_memory_fraction: 1\n",
"}\n",
", '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}\n",
"INFO:tensorflow:Using default config.\n",
"INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff0f473df90>, 'tf_config': gpu_options {\n",
" per_process_gpu_memory_fraction: 1\n",
"}\n",
", '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}\n",
"INFO:tensorflow:Using default config.\n",
"INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff0c8c94490>, 'tf_config': gpu_options {\n",
" per_process_gpu_memory_fraction: 1\n",
"}\n",
", '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}\n"
]
}
],
"source": [
"wide_columns = [\n",
" gender, state,\n",
" purchase_recency, purchase_frequency, monetary_value,\n",
" interaction_recency, interaction_frequency,\n",
" age_buckets, age_x_gender\n",
"]\n",
"\n",
"deep_columns = [\n",
" gender_embedding, state_embedding,\n",
" purchase_recency, purchase_frequency, monetary_value,\n",
" interaction_recency, interaction_frequency\n",
"]\n",
"\n",
"wide = tf.contrib.learn.LinearClassifier(model_dir=model_dir + '_linear',\n",
" feature_columns=wide_columns,\n",
" enable_centered_bias=True)\n",
"\n",
"deep = tf.contrib.learn.DNNClassifier(model_dir=model_dir + '_deep',\n",
" feature_columns=deep_columns,\n",
" hidden_units=[100, 50])\n",
"\n",
"hybrid = tf.contrib.learn.DNNLinearCombinedClassifier(\n",
" model_dir=model_dir + '_hybrid',\n",
" linear_feature_columns=wide_columns,\n",
" dnn_feature_columns=deep_columns,\n",
" dnn_hidden_units=[100, 50])"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false,
"scrolled": false
},
"outputs": [],
"source": [
"def fix_feature_spec(feature_spec):\n",
" for key, feature in feature_spec.items():\n",
" if isinstance(feature, tf.VarLenFeature):\n",
" feature_spec[key] = tf.FixedLenFeature(shape=[1], dtype=feature.dtype, default_value=None)\n",
"\n",
"def train_and_export(model, feature_columns):\n",
" model.fit(input_fn=lambda: input_fn(df_train), steps=1000)\n",
" results = model.evaluate(input_fn=lambda: input_fn(df_test), steps=1)\n",
" for key in sorted(results):\n",
" print(\"%s: %s\" % (key, results[key]))\n",
" \n",
" feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(feature_columns)\n",
" fix_feature_spec(feature_spec)\n",
"\n",
" export_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)\n",
"\n",
" export_path = model.export_savedmodel(model.model_dir, export_input_fn)\n",
" return export_path "
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": false,
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:450 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:450 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:450 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"DEBUG:tensorflow:Setting feature info for mode train to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False)}.\n",
"DEBUG:tensorflow:Setting labels info for mode train to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(80000)]), is_sparse=False)\n",
"DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
"DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
"Instructions for updating:\n",
"The default behavior of sparse_feature_cross is changing, the default\n",
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
"From that point on sparse_feature_cross will always use FingerprintCat64\n",
"to concatenate the feature fingerprints. And the underlying\n",
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
"as deprecated.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
"INFO:tensorflow:Create CheckpointSaverHook.\n",
"INFO:tensorflow:loss = 0.671348, step = 7001\n",
"INFO:tensorflow:Saving checkpoints for 7001 into /models/targeting_linear/model.ckpt.\n",
"WARNING:tensorflow:*******************************************************\n",
"WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
"WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
"WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
"WARNING:tensorflow:now on by default.\n",
"WARNING:tensorflow:*******************************************************\n",
"INFO:tensorflow:loss = 0.670908, step = 7101\n",
"INFO:tensorflow:global_step/sec: 16.8636\n",
"INFO:tensorflow:loss = 0.670837, step = 7201\n",
"INFO:tensorflow:global_step/sec: 21.518\n",
"INFO:tensorflow:loss = 0.670809, step = 7301\n",
"INFO:tensorflow:global_step/sec: 21.7738\n",
"INFO:tensorflow:loss = 0.670792, step = 7401\n",
"INFO:tensorflow:global_step/sec: 21.8282\n",
"INFO:tensorflow:loss = 0.670781, step = 7501\n",
"INFO:tensorflow:global_step/sec: 20.6731\n",
"INFO:tensorflow:loss = 0.670775, step = 7601\n",
"INFO:tensorflow:global_step/sec: 20.3416\n",
"INFO:tensorflow:loss = 0.670768, step = 7701\n",
"INFO:tensorflow:global_step/sec: 21.4195\n",
"INFO:tensorflow:loss = 0.670762, step = 7801\n",
"INFO:tensorflow:global_step/sec: 21.845\n",
"INFO:tensorflow:loss = 0.670759, step = 7901\n",
"INFO:tensorflow:global_step/sec: 21.8376\n",
"INFO:tensorflow:Saving checkpoints for 8000 into /models/targeting_linear/model.ckpt.\n",
"WARNING:tensorflow:*******************************************************\n",
"WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
"WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
"WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
"WARNING:tensorflow:now on by default.\n",
"WARNING:tensorflow:*******************************************************\n",
"INFO:tensorflow:Loss for final step: 0.670755.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:458 in evaluate.: calling evaluate (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:458 in evaluate.: calling evaluate (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:458 in evaluate.: calling evaluate (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"DEBUG:tensorflow:Setting feature info for mode eval to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False)}.\n",
"DEBUG:tensorflow:Setting labels info for mode eval to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(20000)]), is_sparse=False)\n",
"DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
"DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
"Instructions for updating:\n",
"The default behavior of sparse_feature_cross is changing, the default\n",
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
"From that point on sparse_feature_cross will always use FingerprintCat64\n",
"to concatenate the feature fingerprints. And the underlying\n",
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
"as deprecated.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
"INFO:tensorflow:Restored model from /models/targeting_linear\n",
"INFO:tensorflow:Eval steps [0,1) for training step 8000.\n",
"INFO:tensorflow:Saving evaluation summary for step 8000: accuracy = 0.57405, accuracy/baseline_label_mean = 0.4257, accuracy/threshold_0.500000_mean = 0.57405, auc = 0.589328, labels/actual_label_mean = 0.4257, labels/prediction_mean = 0.429727, loss = 0.670349, precision/positive_threshold_0.500000_mean = 0.499326, recall/positive_threshold_0.500000_mean = 0.217642\n",
"accuracy: 0.57405\n",
"accuracy/baseline_label_mean: 0.4257\n",
"accuracy/threshold_0.500000_mean: 0.57405\n",
"auc: 0.589328\n",
"global_step: 8000\n",
"labels/actual_label_mean: 0.4257\n",
"labels/prediction_mean: 0.429727\n",
"loss: 0.670349\n",
"precision/positive_threshold_0.500000_mean: 0.499326\n",
"recall/positive_threshold_0.500000_mean: 0.217642\n",
"WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.linear) is experimental and may change or be removed at any time, and without warning.\n",
"WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.estimator) is experimental and may change or be removed at any time, and without warning.\n",
"DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
"DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
"Instructions for updating:\n",
"The default behavior of sparse_feature_cross is changing, the default\n",
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
"From that point on sparse_feature_cross will always use FingerprintCat64\n",
"to concatenate the feature fingerprints. And the underlying\n",
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
"as deprecated.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py:1203 in export_savedmodel.: initialize_local_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
"Instructions for updating:\n",
"Use `tf.local_variables_initializer` instead.\n",
"INFO:tensorflow:Assets added to graph.\n",
"INFO:tensorflow:No assets to write.\n",
"INFO:tensorflow:SavedModel written to: /models/targeting_linear/1481461170903/saved_model.pb\n"
]
},
{
"data": {
"text/plain": [
"'/models/targeting_linear/1481461170903'"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_and_export(wide, wide_columns)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py:340 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py:340 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py:340 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"DEBUG:tensorflow:Setting feature info for mode train to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False)}.\n",
"DEBUG:tensorflow:Setting labels info for mode train to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(100000)]), is_sparse=False)\n",
"DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f20563876e0>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
"DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f2056387500>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"INFO:tensorflow:Create CheckpointSaverHook.\n",
"INFO:tensorflow:loss = 1.43177, step = 2001\n",
"INFO:tensorflow:Saving checkpoints for 2001 into /models/targeting_deep/model.ckpt.\n",
"WARNING:tensorflow:*******************************************************\n",
"WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
"WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
"WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
"WARNING:tensorflow:now on by default.\n",
"WARNING:tensorflow:*******************************************************\n",
"INFO:tensorflow:loss = 0.667609, step = 2101\n",
"INFO:tensorflow:global_step/sec: 1.84492\n",
"INFO:tensorflow:loss = 0.665916, step = 2201\n",
"INFO:tensorflow:global_step/sec: 1.73343\n",
"INFO:tensorflow:loss = 0.664725, step = 2301\n",
"INFO:tensorflow:global_step/sec: 1.62747\n",
"INFO:tensorflow:loss = 0.665076, step = 2401\n",
"INFO:tensorflow:global_step/sec: 1.24979\n",
"INFO:tensorflow:loss = 0.663669, step = 2501\n",
"INFO:tensorflow:global_step/sec: 1.50628\n",
"INFO:tensorflow:loss = 0.66299, step = 2601\n",
"INFO:tensorflow:global_step/sec: 1.34333\n",
"INFO:tensorflow:loss = 0.66246, step = 2701\n",
"INFO:tensorflow:global_step/sec: 1.23336\n",
"INFO:tensorflow:loss = 0.661846, step = 2801\n",
"INFO:tensorflow:global_step/sec: 1.50963\n",
"INFO:tensorflow:Saving checkpoints for 2867 into /models/targeting_deep/model.ckpt.\n",
"WARNING:tensorflow:*******************************************************\n",
"WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
"WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
"WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
"WARNING:tensorflow:now on by default.\n",
"WARNING:tensorflow:*******************************************************\n",
"INFO:tensorflow:loss = 0.661311, step = 2901\n",
"INFO:tensorflow:global_step/sec: 1.07092\n",
"INFO:tensorflow:Saving checkpoints for 3000 into /models/targeting_deep/model.ckpt.\n",
"WARNING:tensorflow:*******************************************************\n",
"WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
"WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
"WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
"WARNING:tensorflow:now on by default.\n",
"WARNING:tensorflow:*******************************************************\n",
"INFO:tensorflow:Loss for final step: 0.660779.\n",
"WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.dnn) is experimental and may change or be removed at any time, and without warning.\n",
"WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.estimator) is experimental and may change or be removed at any time, and without warning.\n",
"DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f20563876e0>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
"DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f2056387500>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py:1203 in export_savedmodel.: initialize_local_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
"Instructions for updating:\n",
"Use `tf.local_variables_initializer` instead.\n",
"INFO:tensorflow:Assets added to graph.\n",
"INFO:tensorflow:No assets to write.\n",
"INFO:tensorflow:SavedModel written to: /models/targeting_deep/1481293854053/saved_model.pb\n"
]
},
{
"data": {
"text/plain": [
"'/models/targeting_deep/1481293854053'"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_and_export(deep, deep_columns)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false,
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:751 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:751 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:751 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
"Instructions for updating:\n",
"Estimator is decoupled from Scikit Learn interface by moving into\n",
"separate class SKCompat. Arguments x, y and batch_size are only\n",
"available in the SKCompat class, Estimator will only accept input_fn.\n",
"Example conversion:\n",
" est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
"DEBUG:tensorflow:Setting feature info for mode train to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False)}.\n",
"DEBUG:tensorflow:Setting labels info for mode train to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(100000)]), is_sparse=False)\n",
"DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f20563876e0>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
"DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f2056387500>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:366 in _add_hidden_layer_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
"Instructions for updating:\n",
"Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:367 in _add_hidden_layer_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
"Instructions for updating:\n",
"Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:366 in _add_hidden_layer_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
"Instructions for updating:\n",
"Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:367 in _add_hidden_layer_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
"Instructions for updating:\n",
"Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:366 in _add_hidden_layer_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
"Instructions for updating:\n",
"Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:367 in _add_hidden_layer_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
"Instructions for updating:\n",
"Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
"DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
"DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
"WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
"Instructions for updating:\n",
"The default behavior of sparse_feature_cross is changing, the default\n",
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
"From that point on sparse_feature_cross will always use FingerprintCat64\n",
"to concatenate the feature fingerprints. And the underlying\n",
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
"as deprecated.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
"WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
"DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
"INFO:tensorflow:Create CheckpointSaverHook.\n",
"INFO:tensorflow:loss = 1.36845, step = 1006\n",
"INFO:tensorflow:Saving checkpoints for 1006 into /models/targeting_hybrid/model.ckpt.\n",
"WARNING:tensorflow:*******************************************************\n",
"WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
"WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
"WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
"WARNING:tensorflow:now on by default.\n",
"WARNING:tensorflow:*******************************************************\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0mTraceback (most recent call last)",
"\u001b[0;32m<ipython-input-11-273a6ca1390a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrain_and_export\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhybrid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwide_columns\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mdeep_columns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-f2badeb6cc49>\u001b[0m in \u001b[0;36mtrain_and_export\u001b[0;34m(model, feature_columns)\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtrain_and_export\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeature_columns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0minput_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msteps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mfeature_spec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreate_feature_spec_for_parsing\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeature_columns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.pyc\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, input_fn, steps, batch_size, monitors, max_steps)\u001b[0m\n\u001b[1;32m 749\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 750\u001b[0m \u001b[0mmonitors\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhooks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 751\u001b[0;31m max_steps=max_steps)\n\u001b[0m\u001b[1;32m 752\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 753\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/deprecation.pyc\u001b[0m in \u001b[0;36mnew_func\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 245\u001b[0m \u001b[0m_call_location\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdecorator_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_qualified_name\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 246\u001b[0m func.__module__, arg_name, date, instructions)\n\u001b[0;32m--> 247\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 248\u001b[0m new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(\n\u001b[1;32m 249\u001b[0m func.__doc__, date, instructions)\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, input_fn, steps, batch_size, monitors, max_steps)\u001b[0m\n\u001b[1;32m 362\u001b[0m \u001b[0msteps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 363\u001b[0m \u001b[0mmonitors\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmonitors\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 364\u001b[0;31m max_steps=max_steps)\n\u001b[0m\u001b[1;32m 365\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Loss for final step: %s.'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc\u001b[0m in \u001b[0;36m_train_model\u001b[0;34m(self, input_fn, steps, feed_fn, init_op, init_feed_fn, init_fn, device_fn, monitors, log_every_steps, fail_on_nan_loss, max_steps)\u001b[0m\n\u001b[1;32m 739\u001b[0m \u001b[0mfail_on_nan_loss\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfail_on_nan_loss\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 740\u001b[0m \u001b[0mhooks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhooks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 741\u001b[0;31m max_steps=max_steps)\n\u001b[0m\u001b[1;32m 742\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 743\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extract_metric_update_ops\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0meval_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/graph_actions.pyc\u001b[0m in \u001b[0;36m_monitored_train\u001b[0;34m(graph, output_dir, train_op, loss_op, global_step_tensor, init_op, init_feed_dict, init_fn, log_every_steps, supervisor_is_chief, supervisor_master, supervisor_save_model_secs, supervisor_save_model_steps, keep_checkpoint_max, supervisor_save_summaries_secs, supervisor_save_summaries_steps, feed_fn, steps, fail_on_nan_loss, hooks, max_steps)\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0msuper_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_stop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 300\u001b[0m _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else\n\u001b[0;32m--> 301\u001b[0;31m None)\n\u001b[0m\u001b[1;32m 302\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 303\u001b[0m \u001b[0msummary_io\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSummaryWriterCache\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 471\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 472\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 473\u001b[0;31m run_metadata=run_metadata)\n\u001b[0m\u001b[1;32m 474\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 475\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mshould_stop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 626\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 627\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 628\u001b[0;31m run_metadata=run_metadata)\n\u001b[0m\u001b[1;32m 629\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAbortedError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 630\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 593\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 594\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 595\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 596\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 727\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 728\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 729\u001b[0;31m run_metadata=run_metadata)\n\u001b[0m\u001b[1;32m 730\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 731\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_hooks\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 593\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 594\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 595\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 596\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 765\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 766\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 767\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 768\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 769\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 963\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 964\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m--> 965\u001b[0;31m feed_dict_string, options, run_metadata)\n\u001b[0m\u001b[1;32m 966\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 967\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1013\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1014\u001b[0m return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n\u001b[0;32m-> 1015\u001b[0;31m target_list, options, run_metadata)\n\u001b[0m\u001b[1;32m 1016\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1017\u001b[0m return self._do_call(_prun_fn, self._session, handle, feed_dict,\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1021\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1022\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1023\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1024\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1002\u001b[0m return tf_session.TF_Run(session, options,\n\u001b[1;32m 1003\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1004\u001b[0;31m status, run_metadata)\n\u001b[0m\u001b[1;32m 1005\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1006\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msession\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"train_and_export(hybrid, wide_columns + deep_columns)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment