Skip to content

Instantly share code, notes, and snippets.

@pajayrao
Last active April 1, 2017 08:21
Show Gist options
  • Save pajayrao/52277dee4b7e52c60d6674f126e26258 to your computer and use it in GitHub Desktop.
Save pajayrao/52277dee4b7e52c60d6674f126e26258 to your computer and use it in GitHub Desktop.
Few introduction programs on tensorflow
import tensorflow as tf
a=tf.Variable([1,2,3,4,5])
b=tf.Variable([3,4,5,6,7])
c=tf.add(a,b)
z=tf.Variable([[1,2,3,4,5]])
d=tf.Variable([[1],[2],[3],[4],[5]])
e=tf.matmul(z,d)
# sess = tf.Session()

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(c)
sess.run(e)
import tensorflow as tf
import numpy as np
m=1000
n=1000
p=1000
q=1000
a=tf.Variable(np.random.rand(m,n))
b=tf.Variable(np.random.rand(p,q))
matmul = tf.matmul(a,b)
# sess = tf.Session()

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(matmul)
array([[ 238.13376907,  245.3936538 ,  244.13400084, ...,  248.77741554,
         244.08743581,  245.1664966 ],
       [ 255.36406856,  261.55705232,  256.95658383, ...,  271.35774203,
         261.09122763,  260.6325724 ],
       [ 252.07361543,  266.47615965,  256.26324837, ...,  267.15410024,
         261.16308213,  260.53740344],
       ..., 
       [ 240.45078061,  248.99925244,  254.01739122, ...,  257.57329959,
         245.12092099,  251.79659716],
       [ 242.44459215,  251.46102834,  250.36697372, ...,  257.11558028,
         244.47140506,  250.76506807],
       [ 238.89541478,  242.51683293,  240.10959558, ...,  245.02397621,
         240.54384861,  237.54347636]])
import tensorflow as tf
import numpy as np
x_data= np.random.rand(100).astype(np.float32)
y_data= x_data * 0.1 +0.3
W = tf.Variable(tf.random_uniform([1], -1.0,1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
sess.run(init)
for step in range(100000):
    sess.run(train)
    if step % 2000 == 0:
        print(step, sess.run(W), sess.run(b))
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
with tf.name_scope("input_X"):
	x = tf.placeholder(tf.float32, [None, 784])
with tf.name_scope("Weight_and_bias"):
	W = tf.Variable(tf.zeros([784, 10]))
	b = tf.Variable(tf.zeros([10]))
w_h = tf.summary.histogram("weight",W)
b_h = tf.summary.histogram("bias",b)
with tf.name_scope("Prediction_y"):
	y = tf.nn.softmax(tf.matmul(x, W) + b)
with tf.name_scope("Actual_y"):
	y_ = tf.placeholder(tf.float32, [None, 10])
with tf.name_scope("Cross_Entropy"):
	cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
	tf.summary.scalar("cost_function",cross_entropy)
with tf.name_scope("GradientDescentOptimizer"):
	train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
with tf.name_scope("accuracy"):
        correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar("accuracy",accuracy)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
merged_summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter("output", sess.graph)
for i in range(1000):
	batch_xs, batch_ys = mnist.train.next_batch(100)
	result = sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
	summary_val = sess.run(merged_summary_op, feed_dict={x: batch_xs, y_: batch_ys})
	writer.add_summary(summary_val)
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
writer.close()
import tensorflow as tf
import pandas as pd
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from sklearn.cross_validation import train_test_split
train = pd.read_csv('data/train.csv')
train = train.sample(frac=1)
y = train['target']
y = y.apply(lambda row: int(row[-1]))
x = train.drop(['id', 'target'], axis=1)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
classifier = learn.DNNClassifier(hidden_units=[45, 22, 15, 10],
    n_classes=10,
    feature_columns=learn.infer_real_valued_columns_from_input(x_train),
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05))
classifier.fit(x_train, y_train, batch_size=15, steps=50000)
classifier.evaluate(x=x_test, y=y_test)
tflr = learn.LinearClassifier(n_classes=10,
    feature_columns=learn.infer_real_valued_columns_from_input(x_train),
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05))
tflr.fit(x_train, y_train, batch_size=128, steps=50000)
tflr.evaluate(x=x_test, y=y_test)
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)
def bias_variable(shape):
    initial = tf.constant(0.1,shape=shape)
    return tf.Variable(initial)
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1], padding='SAME')
def maxpool(x):
    return tf.nn.max_pool(x,ksize = [1,2,2,1], strides=[1,2,2,1], padding='SAME')
with tf.name_scope('Input_x'):
    x = tf.placeholder(tf.float32,shape=[None,784])
with tf.name_scope('Output_y'):
    y = tf.placeholder(tf.float32,shape=[None,10])
x_image = tf.reshape(x,[-1,28,28,1])
with tf.name_scope('Layer_1_Weights'):
    W_conv1 = weight_variable([5,5,1,32])
    b_conv1= bias_variable([32])
    tf.summary.histogram('layer_1_weight', W_conv1)
    tf.summary.histogram('layer_1_bias',b_conv1)
with tf.name_scope('Layer_1'):
    h_conv1= tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
    h_pool1 = maxpool(h_conv1)
with tf.name_scope('Layer_2_Weights'):
    W_conv2 = weight_variable([5,5,32,64])
    b_conv2= bias_variable([64])
    tf.summary.histogram('layer_2_weight', W_conv2)
    tf.summary.histogram('layer_2_bias',b_conv2)
with tf.name_scope('Layer_2'):
    h_conv2= tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
    h_pool2 = maxpool(h_conv2)
with tf.name_scope('Densely_Connected_Weights'):
    w_den = weight_variable([7*7*64,1024])
    b_den = bias_variable([1024])
    tf.summary.histogram('dense_weight', w_den)
    tf.summary.histogram('dense_bias',b_den)
with tf.name_scope('Densely_Connected_Layer'):
    h_pool_flat = tf.reshape(h_pool2,[-1,7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool_flat,w_den) + b_den)
with tf.name_scope('Dropout_Layer'):
    keep_prob = tf.placeholder(tf.float32)
    h_drop = tf.nn.dropout(h_fc1,keep_prob)
with tf.name_scope('Readout_Layer_Weights'):
    w_read = weight_variable([1024,10])
    b_read = bias_variable([10])
    tf.summary.histogram('read_out_weight', w_read)
    tf.summary.histogram('read_out_bias',b_read)
with tf.name_scope('Readout_Layer'):
    y_conv = tf.matmul(h_drop,w_read) + b_read
with tf.name_scope("Cross_Entropy"):
    cross_entropy = tf.reduce_mean(-tf.reduce_sum( y_conv * tf.log(y), reduction_indices=[1]))
    tf.summary.scalar("cost_function",cross_entropy)
with tf.name_scope("GradientDescentOptimizer"):
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    tf.summary.scalar('accuracy', accuracy)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
merged_summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter('output',sess.graph)
for i in range(2000):
    batch = mnist.train.next_batch(32)
    if i%100 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x:batch[0], y: batch[1], keep_prob: 1.0},session=sess)
        print("step %d, training accuracy %g"%(i, train_accuracy))
    train_step.run(feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5},session=sess)

    #summary_val = sess.run(merged_summary_op,feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})
    #writer.add_summary(summary_val)
print("test accuracy %g"%accuracy.eval(feed_dict={
    x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0},session=sess))

Prepared By

  • P Ajay Rao
  • Navaneesh Kumar B
  • Siddharth Cadabam
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment