Skip to content

Instantly share code, notes, and snippets.

@fonnesbeck
Created January 11, 2016 13:41
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save fonnesbeck/89af4600104fa5acb403 to your computer and use it in GitHub Desktop.
Save fonnesbeck/89af4600104fa5acb403 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import skflow as sf\n",
"import tensorflow as tf\n",
"from sklearn.cross_validation import train_test_split\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.metrics import accuracy_score"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"np.random.seed(42)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>PassengerId</th>\n",
" <th>Survived</th>\n",
" <th>Pclass</th>\n",
" <th>Name</th>\n",
" <th>Sex</th>\n",
" <th>Age</th>\n",
" <th>SibSp</th>\n",
" <th>Parch</th>\n",
" <th>Ticket</th>\n",
" <th>Fare</th>\n",
" <th>Cabin</th>\n",
" <th>Embarked</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>3</td>\n",
" <td>Braund, Mr. Owen Harris</td>\n",
" <td>male</td>\n",
" <td>22</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>A/5 21171</td>\n",
" <td>7.2500</td>\n",
" <td>NaN</td>\n",
" <td>S</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>2</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>Cumings, Mrs. John Bradley (Florence Briggs Th...</td>\n",
" <td>female</td>\n",
" <td>38</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>PC 17599</td>\n",
" <td>71.2833</td>\n",
" <td>C85</td>\n",
" <td>C</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>3</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>Heikkinen, Miss. Laina</td>\n",
" <td>female</td>\n",
" <td>26</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>STON/O2. 3101282</td>\n",
" <td>7.9250</td>\n",
" <td>NaN</td>\n",
" <td>S</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>4</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>Futrelle, Mrs. Jacques Heath (Lily May Peel)</td>\n",
" <td>female</td>\n",
" <td>35</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>113803</td>\n",
" <td>53.1000</td>\n",
" <td>C123</td>\n",
" <td>S</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>5</td>\n",
" <td>0</td>\n",
" <td>3</td>\n",
" <td>Allen, Mr. William Henry</td>\n",
" <td>male</td>\n",
" <td>35</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>373450</td>\n",
" <td>8.0500</td>\n",
" <td>NaN</td>\n",
" <td>S</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" PassengerId Survived Pclass \\\n",
"0 1 0 3 \n",
"1 2 1 1 \n",
"2 3 1 3 \n",
"3 4 1 1 \n",
"4 5 0 3 \n",
"\n",
" Name Sex Age SibSp \\\n",
"0 Braund, Mr. Owen Harris male 22 1 \n",
"1 Cumings, Mrs. John Bradley (Florence Briggs Th... female 38 1 \n",
"2 Heikkinen, Miss. Laina female 26 0 \n",
"3 Futrelle, Mrs. Jacques Heath (Lily May Peel) female 35 1 \n",
"4 Allen, Mr. William Henry male 35 0 \n",
"\n",
" Parch Ticket Fare Cabin Embarked \n",
"0 0 A/5 21171 7.2500 NaN S \n",
"1 0 PC 17599 71.2833 C85 C \n",
"2 0 STON/O2. 3101282 7.9250 NaN S \n",
"3 0 113803 53.1000 C123 S \n",
"4 0 373450 8.0500 NaN S "
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data = pd.read_csv('tf_examples/data/titanic_train.csv')\n",
"data.head()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"predictors = ['Age', 'SibSp', 'Fare', 'Parch', 'Pclass', 'Sex']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"X_train, X_test, y_train, y_test = train_test_split(data[predictors].replace({'Sex':{'male':0, \n",
" 'female':1}}).fillna(data.mean()), \n",
" data.Survived)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0.79372197309417036"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lr = LogisticRegression()\n",
"lr.fit(X_train, y_train)\n",
"lr.score(X_test, y_test)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"classifier = sf.TensorFlowLinearClassifier(n_classes=2, \n",
" batch_size=128, \n",
" steps=500, \n",
" learning_rate=0.05)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Step #1, avg. loss: 5.86508\n",
"Step #51, avg. loss: 2.95665\n",
"Step #101, avg. loss: 2.78301\n",
"Step #151, avg. loss: 2.82941\n",
"Step #201, avg. loss: 2.80895\n",
"Step #251, avg. loss: 2.85910\n",
"Step #301, avg. loss: 2.74752\n",
"Step #351, avg. loss: 2.73762\n",
"Step #401, avg. loss: 2.79703\n",
"Step #451, avg. loss: 2.81233\n"
]
},
{
"data": {
"text/plain": [
"TensorFlowLinearClassifier(batch_size=128, continue_training=False,\n",
" early_stopping_rounds=None,\n",
" keep_checkpoint_every_n_hours=10000, learning_rate=0.05,\n",
" max_to_keep=5, n_classes=2, optimizer='SGD', steps=500,\n",
" tf_master='', tf_random_seed=42, verbose=1)"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"classifier.fit(X_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0.60089686098654704"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"classifier.score(X_test, y_test)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"classifier = sf.TensorFlowDNNClassifier(\n",
" hidden_units=[10, 20, 20, 10], \n",
" n_classes=2, \n",
" batch_size=128, \n",
" steps=20000, \n",
" learning_rate=0.05)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Step #1, avg. loss: 5.08049\n",
"Step #2001, avg. loss: 0.58594\n",
"Step #4001, avg. loss: 0.51789\n",
"Step #6001, avg. loss: 0.47891\n",
"Step #8001, avg. loss: 0.45563\n",
"Step #10001, avg. loss: 0.43729\n",
"Step #12001, avg. loss: 0.41228\n",
"Step #14001, avg. loss: 0.39740\n",
"Step #16001, avg. loss: 0.39062\n",
"Step #18001, avg. loss: 0.38242\n"
]
},
{
"data": {
"text/plain": [
"TensorFlowDNNClassifier(batch_size=128, continue_training=False,\n",
" early_stopping_rounds=None, hidden_units=[10, 20, 20, 10],\n",
" keep_checkpoint_every_n_hours=10000, learning_rate=0.05,\n",
" max_to_keep=5, n_classes=2, optimizer='SGD', steps=20000,\n",
" tf_master='', tf_random_seed=42, verbose=1)"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"classifier.fit(X_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0.76681614349775784"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"classifier.score(X_test, y_test)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"from tensorflow import tanh\n",
"\n",
"def dnn_tanh(X, y):\n",
" layers = sf.ops.dnn(X, [10, 20, 10], tanh)\n",
" return sf.models.logistic_regression(layers, y)\n",
"\n",
"classifier = sf.TensorFlowEstimator(\n",
" model_fn=dnn_tanh, \n",
" n_classes=2,\n",
" batch_size=128,\n",
" steps=500,\n",
" learning_rate=0.05)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Step #1, avg. loss: 0.67051\n",
"Step #51, avg. loss: 0.62905\n",
"Step #101, avg. loss: 0.60878\n",
"Step #151, avg. loss: 0.60869\n",
"Step #201, avg. loss: 0.59843\n",
"Step #251, avg. loss: 0.61128\n",
"Step #301, avg. loss: 0.59994\n",
"Step #351, avg. loss: 0.59463\n",
"Step #401, avg. loss: 0.59706\n",
"Step #451, avg. loss: 0.59240\n"
]
},
{
"data": {
"text/plain": [
"TensorFlowEstimator(batch_size=128, continue_training=False,\n",
" early_stopping_rounds=None, keep_checkpoint_every_n_hours=10000,\n",
" learning_rate=0.05, max_to_keep=5,\n",
" model_fn=<function dnn_tanh at 0x10bbc22f0>, n_classes=2,\n",
" num_cores=4, optimizer='SGD', steps=500, tf_master='',\n",
" tf_random_seed=42, verbose=1)"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"classifier.fit(X_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0.68161434977578472"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"score = accuracy_score(classifier.predict(X_test), y_test)\n",
"score"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Digit recognition"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from sklearn import datasets\n",
"digits = datasets.load_digits()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"X = digits.images\n",
"y = digits.target\n",
"\n",
"X_train, X_test, y_train, y_test = train_test_split(X, y,\n",
" test_size=0.2, random_state=42)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This function creates a 2-dimensional convolutional layer with max pooling."
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def conv_model(X, y):\n",
" X = tf.expand_dims(X, 3)\n",
" features = tf.reduce_max(sf.ops.conv2d(X, 12, [3, 3]), [1, 2])\n",
" features = tf.reshape(features, [-1, 12])\n",
" return sf.models.logistic_regression(features, y)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Step #1, avg. loss: 13.47284\n",
"Step #501, avg. loss: 1.53449\n",
"Step #1001, avg. loss: 0.74431\n",
"Step #1501, avg. loss: 0.70765\n",
"Step #2001, avg. loss: 0.66011\n",
"Step #2501, avg. loss: 0.66769\n",
"Step #3001, avg. loss: 0.64826\n",
"Step #3501, avg. loss: 0.62919\n",
"Step #4001, avg. loss: 0.61455\n",
"Step #4501, avg. loss: 0.58433\n"
]
},
{
"data": {
"text/plain": [
"TensorFlowEstimator(batch_size=128, continue_training=False,\n",
" early_stopping_rounds=None, keep_checkpoint_every_n_hours=10000,\n",
" learning_rate=0.05, max_to_keep=5,\n",
" model_fn=<function conv_model at 0x10c9720d0>, n_classes=10,\n",
" num_cores=4, optimizer='SGD', steps=5000, tf_master='',\n",
" tf_random_seed=42, verbose=1)"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"classifier = sf.TensorFlowEstimator(model_fn=conv_model, n_classes=10,\n",
" steps=5000, learning_rate=0.05,\n",
" batch_size=128)\n",
"classifier.fit(X_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0.72499999999999998"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"score = accuracy_score(classifier.predict(X_test), y_test)\n",
"score"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.1"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment