Skip to content

Instantly share code, notes, and snippets.

@rohanjoseph93
Created June 1, 2018 00:41
Show Gist options
  • Save rohanjoseph93/bd8002cf13e1810624c03a54ff32bbcb to your computer and use it in GitHub Desktop.
Save rohanjoseph93/bd8002cf13e1810624c03a54ff32bbcb to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"#Sentdex - Deep learning with neural networks\n",
"import os\n",
"import tensorflow as tf\n",
"\n",
"x1 = tf.constant(5)\n",
"x2 = tf.constant(6)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"30\n"
]
}
],
"source": [
"#Multiply the constants\n",
"result = tf.multiply(x1,x2)\n",
"\n",
"\n",
"#Run the tensorflow session and check result\n",
"sess = tf.Session()\n",
"print(sess.run(result))\n",
"\n",
"#Close the session\n",
"sess.close()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"30\n"
]
}
],
"source": [
"#It will automatically close the session when done - better way to do it\n",
"with tf.Session() as sess:\n",
" print(sess.run(result))\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Extracting /tmp/data\\train-images-idx3-ubyte.gz\n",
"Extracting /tmp/data\\train-labels-idx1-ubyte.gz\n",
"Extracting /tmp/data\\t10k-images-idx3-ubyte.gz\n",
"Extracting /tmp/data\\t10k-labels-idx1-ubyte.gz\n"
]
}
],
"source": [
"#MNIST - 60k training examples of handwritten digits (28*28 pixels)\n",
"#10k testing examples\n",
"\n",
"#Objective : Take the examples and pass it to Neural network and output the correct the correct number\n",
"#Features - Is there anything on the pixel or not?\n",
"\n",
"#input > weight > hidden layer 1 > (activation function) > weights > hidden layer 2\n",
"#(activation function) > weights > output layer\n",
"#Feed forward NN \n",
"#Compare output to intended output > cost function\n",
"#optimization function (optimizer) > minimize cost (AdamOptimizer..., AdaGrad)\n",
"#backpropagation\n",
"#feed forward + backprop = epoch\n",
"\n",
"from tensorflow.examples.tutorials.mnist import input_data\n",
"#one_hot parameter useful for multi-class classification\n",
"mnist = input_data.read_data_sets(\"/tmp/data\",one_hot=True)\n",
"#10 classes : 0-9"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"#Three hidden layers\n",
"n_nodes_hl1 = 1000\n",
"n_nodes_hl2 = 1000\n",
"n_nodes_hl3 = 1000\n",
"\n",
"#Number of classes\n",
"n_classes = 10\n",
"\n",
"#Will go through 100 features at a time\n",
"batch_size = 100\n",
"\n",
"#Placeholder variables (height * width)\n",
"#These are placeholders for some values in the graph\n",
"x = tf.placeholder('float',[None,784])\n",
"y = tf.placeholder('float')\n",
"\n",
"def neural_network_model(data):\n",
" \n",
" hidden_1_layer = {'weights' : tf.Variable(tf.random_normal([784,n_nodes_hl1])),\n",
" 'biases' : tf.Variable(tf.random_normal([n_nodes_hl1]))}\n",
" #bias is used to make some neurons fire even if all inputs is 0\n",
" hidden_2_layer = {'weights' : tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),\n",
" 'biases' : tf.Variable(tf.random_normal([n_nodes_hl2]))}\n",
" hidden_3_layer = {'weights' : tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),\n",
" 'biases' : tf.Variable(tf.random_normal([n_nodes_hl3]))}\n",
" output_layer = {'weights' : tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),\n",
" 'biases' : tf.Variable(tf.random_normal([n_classes]))}\n",
" #(input_data*weights) + biases\n",
" l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']),hidden_1_layer['biases'])\n",
" l1 = tf.nn.relu(l1)\n",
" \n",
" l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']),hidden_2_layer['biases'])\n",
" l2 = tf.nn.relu(l2)\n",
" \n",
" l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']),hidden_3_layer['biases'])\n",
" l3 = tf.nn.relu(l3)\n",
" \n",
" output = tf.matmul(l3,output_layer['weights'])+ output_layer['biases']\n",
" \n",
" return output\n",
"\n",
"#Boom - Modeled a neural network\n",
"#Done with the computation graph\n",
"#We have coded the model\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"#Running our network\n",
"def train_neural_network(x):\n",
" prediction = neural_network_model(x)\n",
" #Cost function is cross entropy with logits\n",
" cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))\n",
" \n",
" #Choose the optimizer\n",
" optimizer = tf.train.AdamOptimizer().minimize(cost)\n",
" \n",
" #Cycles feed forward + backprop\n",
" hm_epochs = 10\n",
" \n",
" with tf.Session() as sess:\n",
" sess.run(tf.global_variables_initializer())\n",
" #Trainng the network\n",
" for epoch in range(hm_epochs):\n",
" epoch_loss = 0\n",
" for _ in range(int(mnist.train.num_examples/batch_size)):\n",
" epoch_x,epoch_y = mnist.train.next_batch(batch_size)\n",
" _, c = sess.run([optimizer,cost], feed_dict = {x:epoch_x,y:epoch_y})\n",
" epoch_loss += c\n",
" print('Epoch',epoch,'Completed out of',hm_epochs,'loss:',epoch_loss)\n",
" \n",
" \n",
" correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))\n",
" accuracy = tf.reduce_mean(tf.cast(correct,'float'))\n",
" print('Accuracy:',accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From <ipython-input-7-963725452600>:5: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"\n",
"Future major versions of TensorFlow will allow gradients to flow\n",
"into the labels input on backprop by default.\n",
"\n",
"See tf.nn.softmax_cross_entropy_with_logits_v2.\n",
"\n",
"Epoch 0 Completed out of 10 loss: 3758147.80841\n",
"Epoch 1 Completed out of 10 loss: 796936.213623\n",
"Epoch 2 Completed out of 10 loss: 372440.355949\n",
"Epoch 3 Completed out of 10 loss: 188040.790529\n",
"Epoch 4 Completed out of 10 loss: 114344.558451\n",
"Epoch 5 Completed out of 10 loss: 91873.0738357\n",
"Epoch 6 Completed out of 10 loss: 85191.3208058\n",
"Epoch 7 Completed out of 10 loss: 69750.7198949\n",
"Epoch 8 Completed out of 10 loss: 67209.9048491\n",
"Epoch 9 Completed out of 10 loss: 56794.7105298\n",
"Accuracy: 0.9593\n"
]
}
],
"source": [
"train_neural_network(x)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment