Last active
April 2, 2018 18:25
-
-
Save fancyerii/af10e8cc35ffda4c983c6e9cf074b317 to your computer and use it in GitHub Desktop.
python save tensorflow model
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import random as rand | |
import numpy as np | |
data=np.reshape(np.random.uniform(0,1, 1000),(1000,1)) | |
label = 2*data+1 | |
# Setting configurations | |
n_nodes_hl1 = 3 # nodes in hidden layer 1 | |
n_nodes_hl2 = 5 # nodes in hidden layer 2 | |
n_nodes_hl3 = 3 # nodes in hidden layer 3 | |
n_classes = 1 # number of classes = 1. Regression | |
batch_size = 100 | |
x = tf.placeholder('float', [None, 1], name='input') | |
y = tf.placeholder('float', [None, 1]) # the size is not specified (it can be anything) | |
# Defining the computation graph - the neural network model | |
def neural_network_model(input): | |
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([1, n_nodes_hl1])), | |
# randomly (Normal dist) initialized weights of size 784 x n_nodes_hl1 | |
'biases': tf.Variable(tf.random_normal( | |
[n_nodes_hl1]))} # randomly initialized weights (Normal distribution) of length n_nodes_hl1 | |
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])), | |
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))} | |
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])), | |
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))} | |
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])), | |
'biases': tf.Variable(tf.random_normal([n_classes]))} | |
# forward pass | |
z1 = tf.add(tf.matmul(input, hidden_1_layer['weights']), hidden_1_layer['biases']) | |
a1 = tf.nn.relu(z1) | |
z2 = tf.add(tf.matmul(a1, hidden_2_layer['weights']), hidden_2_layer['biases']) | |
a2 = tf.nn.relu(z2) | |
z3 = tf.add(tf.matmul(a2, hidden_3_layer['weights']), hidden_3_layer['biases']) | |
a3 = tf.nn.relu(z3) | |
yhat = tf.add(tf.matmul(a3, output_layer['weights']), output_layer['biases'], name='output') | |
return yhat | |
# defining the training | |
def train_neural_network(input): | |
prediction = neural_network_model(input) | |
cost = tf.reduce_mean(tf.square(prediction - y)) | |
optimizer = tf.train.AdamOptimizer().minimize(cost) | |
nEpochs = 1000 | |
with tf.Session() as sess: | |
sess.run(tf.global_variables_initializer()) | |
for epoch in range(nEpochs): | |
epoch_loss = 0 | |
for batch in range(int(len(data) / batch_size)): | |
start = 0 + (batch) * batch_size | |
end = batch_size + (batch) * batch_size | |
epoch_x = data[range(start, end)] | |
epoch_y = label[range(start, end)] | |
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y}) | |
epoch_loss += c | |
print('Epoch', epoch, 'completed out of', nEpochs, 'loss:', epoch_loss) | |
pred=sess.run(prediction, feed_dict={x:[[0.5]]}) | |
print("pred:", pred) | |
save_model(sess, prediction) | |
error = tf.reduce_mean(tf.square(prediction - y)) | |
# accuracy = tf.reduce_mean(tf.cast(error, 'float')) | |
print('Error:', error) | |
# saving the trained model | |
def save_model(session, prediction): | |
tf.global_variables_initializer().run() | |
signature = tf.saved_model.signature_def_utils.build_signature_def( | |
inputs={'input': tf.saved_model.utils.build_tensor_info(x)}, | |
outputs={'output': tf.saved_model.utils.build_tensor_info(prediction)}, | |
) | |
b = tf.saved_model.builder.SavedModelBuilder('/home/mc/data/test-tensorflow/model') | |
b.add_meta_graph_and_variables(session, | |
[tf.saved_model.tag_constants.SERVING], | |
signature_def_map={ | |
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}) | |
b.save() | |
train_neural_network(x) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment