Skip to content

Instantly share code, notes, and snippets.

@Madhivarman
Last active June 6, 2019 12:57
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Madhivarman/e3aff22e89c6fed14e859a2c4f4d74f8 to your computer and use it in GitHub Desktop.
Save Madhivarman/e3aff22e89c6fed14e859a2c4f4d74f8 to your computer and use it in GitHub Desktop.
TensorFlow Code - Training a model to count how many ones are there in the string.
#train model to count number of 1's present in the string
import numpy as np
from random import shuffle
#import necessary libarary
import tensorflow as tf
training_data = ['{0:020b}'.format(i) for i in range(2**20)]
shuffle(training_data)
train_input = [map(int,i) for i in training_data]
ti = [] #list to store each tensor
for i in train_input:
temp_list = []
for j in i:
temp_list.append([j])
ti.append(temp_list)
#assign ti to the training data
train_input = ti
#lets create a output data to the above input
#create a temporary output list to do one hot encoding
train_output = []
for i in train_input:
count = 0
for j in i:
if j[0] == 1:
count+=1
temp_list = ([0]*21) #create a empty list as length(21)
temp_list[count]=1 #encode 1 to the temp_list at position count
train_output.append(temp_list) #append to the output list
#creating a test_data
number_of_samples = 100000 #100000 data as training data ,remaninig data as a testing data
test_input = train_input[number_of_samples:]
test_output = train_output[number_of_samples:]
#training_data
X = train_input[:number_of_samples]
Y = train_output[:number_of_samples]
data = tf.placeholder(tf.float32,[None,20,1])
output = tf.placeholder(tf.float32,[None,21])
hidden_layers = 24 #number of hidden layers
cell = tf.nn.rnn_cell.LSTMCell(hidden_layers,state_is_tuple=True) #Total number of LSTM Cell
val,state = tf.nn.dynamic_rnn(cell,data,dtype=tf.float32)
val = tf.transpose(val,[1,0,2]) #transpose the matrix
last = tf.gather(val,int(val.get_shape()[0])-1) #value of output to the last tensor
#declaring weight and biases
weight = tf.Variable(tf.truncated_normal([hidden_layers,int(output.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1,shape=[output.get_shape()[1]]))
#prediction
prediction = tf.nn.softmax(tf.matmul(last,weight)+bias)
cross_entropy = -tf.reduce_sum(output * tf.log(tf.clip_by_value(prediction,1e-10,1.0)))
#opitmizer
optimizer = tf.train.AdamOptimizer()
minimizer = optimizer.minimize(cross_entropy)
#calculating mistakes and error_rates
mistakes = tf.not_equal(tf.argmax(output,1),tf.argmax(prediction,1))
error_rate = tf.reduce_mean(tf.cast(mistakes,tf.float32))
#initialize the graph
#start the session
run_init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(run_init_op)
#batchsize
batchsize = 1000
no_of_batches = int((len(X)) / batchsize)
epochs = 3000
#loop
for i in range(epochs):
ptr = 0
for j in range(no_of_batches):
inp,out = X[ptr:ptr+batchsize],Y[ptr:ptr+batchsize]
ptr += batchsize
sess.run(minimizer,{data:inp,output:out})
print("Epochs {} is processed".format(str(i)))
incorrect = sess.run(error_rate,{data:test_input,output:test_output})
print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * error_rate))
#save the tensorflow model
save_path = saver.save(sess,'Count_Predict_Trained_model.ckpt')
sess.close() #close the session
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment