Last active
May 25, 2017 02:54
-
-
Save reccanti/a6af412cfdaa567e205791dbfa6a8d87 to your computer and use it in GitHub Desktop.
The Tensorflow Getting Started Tutorials
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
# get MNIST data | |
from tensorflow.examples.tutorials.mnist import input_data | |
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | |
# describe a placeholder for a value | |
x = tf.placeholder(tf.float32, [None, 784]) | |
# define the weights and biases | |
W = tf.Variable(tf.zeros([784, 10])) | |
b = tf.Variable(tf.zeros([10])) | |
# implement model | |
y = tf.nn.softmax(tf.matmul(x, W) + b) | |
# calculate loss using cross-entropy | |
y_ = tf.placeholder(tf.float32, [None, 10]) | |
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) | |
# create training step | |
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) | |
# initialize the session and run the training step | |
sess = tf.InteractiveSession() | |
tf.global_variables_initializer().run() | |
for _ in range(1000): | |
batch_xs, batch_ys = mnist.train.next_batch(100) | |
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) | |
# get the accuracy of the model | |
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) | |
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | |
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Silence Warnings | |
import os | |
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' | |
import tensorflow as tf | |
import numpy as np | |
# Create 2 constant nodes | |
node1 = tf.constant(3.0, tf.float32) | |
node2 = tf.constant(4.0) | |
print(node1, node2) | |
# Evaluate nodes | |
sess = tf.Session() | |
print(sess.run([node1, node2])) | |
# Use add operation to add both nodes | |
node3 = tf.add(node1, node2) | |
print("node3: ", node3) | |
print("sess.run(node3): ", sess.run(node3)) | |
# use placeholder values to provide input parameters | |
a = tf.placeholder(tf.float32) | |
b = tf.placeholder(tf.float32) | |
adder_node = a + b | |
print(sess.run(adder_node, {a: 3, b: 4.5})) | |
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]})) | |
# placeholders can be combined to form complex operations | |
add_and_triple = adder_node * 3. | |
print(sess.run(add_and_triple, {a: 3, b: 4.5})) | |
# Make a trainable model using Variables | |
W = tf.Variable([.3], tf.float32) | |
c = tf.Variable([-.3], tf.float32) | |
x = tf.placeholder(tf.float32) | |
linear_model = W * x + c | |
init = tf.global_variables_initializer() | |
sess.run(init) | |
print(sess.run(linear_model, { x:[1,2,3,4] })) | |
# evaluate the model with a loss function | |
y = tf.placeholder(tf.float32) | |
squared_deltas = tf.square(linear_model - y) | |
loss = tf.reduce_sum(squared_deltas) | |
print(sess.run(loss, { x:[1,2,3,4], y:[0,-1,-2,-3] })) | |
# manually assign values to W and c to get the correct output | |
fixW = tf.assign(W, [-1.]) | |
fixc = tf.assign(c, [1.]) | |
sess.run([fixW, fixc]) | |
print(sess.run(loss, { x:[1,2,3,4], y:[0,-1,-2,-3] })) | |
# reset variables and train the program using a gradient optimizer | |
sess.run(init) | |
optimizer = tf.train.GradientDescentOptimizer(0.01) | |
train = optimizer.minimize(loss) | |
for i in range(1000): | |
sess.run(train, { x:[1,2,3,4], y:[0,-1,-2,-3] }) | |
print(sess.run([W, c])) | |
# Reimplement linear regression using tf.contrib.learn | |
sess.run(init) | |
features = [tf.contrib.layers.real_valued_column("x", dimension=1)] | |
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features) | |
x = np.array([1.,2.,3.,4.]) | |
y = np.array([0.,-1.,-2.,-3.]) | |
input_fn = tf.contrib.learn.io.numpy_input_fn({"x":x}, y, batch_size=4, num_epochs=1000) | |
estimator.fit(input_fn=input_fn, steps=1000) | |
print(estimator.evaluate(input_fn=input_fn)) | |
# implement a custom model | |
sess.run(init) | |
def model(features, labels, mode): | |
# Build a lnear model and predict values | |
W = tf.get_variable("W", [1], dtype=tf.float64) | |
b = tf.get_variable("b", [1], dtype=tf.float64) | |
y = W*features['x'] + b | |
# Loss sub-graph | |
loss = tf.reduce_sum(tf.square(y - labels)) | |
# Training sub-graph | |
global_step = tf.train.get_global_step() | |
optimizer = tf.train.GradientDescentOptimizer(0.01) | |
train = tf.group(optimizer.minimize(loss), tf.assign_add(global_step, 1)) | |
# Connect sub-graphs usng ModelFnOps | |
return tf.contrib.learn.ModelFnOps( | |
mode=mode, | |
predictions=y, | |
loss=loss, | |
train_op=train | |
) | |
# define data set | |
estimator = tf.contrib.learn.Estimator(model_fn=model) | |
x = np.array([1.,2.,3.,4.]) | |
y = np.array([0.,-1.,-2.,-3.]) | |
input_fn = tf.contrib.learn.io.numpy_input_fn({"x":x}, y, 4, num_epochs=1000) | |
# train | |
estimator.fit(input_fn=input_fn, steps=1000) | |
# evaluate our model | |
print(estimator.evaluate(input_fn=input_fn, steps=10)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment