Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@den250400

den250400/FCN Secret

Created March 23, 2018 10:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save den250400/1641d20dc31fbcf1070211451eac7628 to your computer and use it in GitHub Desktop.
Save den250400/1641d20dc31fbcf1070211451eac7628 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
import os
import struct
import cv2
from matplotlib import pyplot as plt
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#Variable initialization
x = tf.placeholder(tf.float32, shape=[None, 384, 512, 3])
y_ = tf.placeholder(tf.float32, shape=[None, 48, 64])
x_image = tf.reshape(x, [-1, 384, 512, 3])
#Network forward-pass definition
#First convolutional layer(2 convolutions)
W_conv1_1 = weight_variable([5, 5, 3, 64])
b_conv1_1 = bias_variable([64])
h_conv1_1 = tf.nn.relu(conv2d(x_image, W_conv1_1) + b_conv1_1)
W_conv1_2 = weight_variable([5, 5, 64, 64])
b_conv1_2 = bias_variable([64])
h_conv1_2 = tf.nn.relu(conv2d(h_conv1_1, W_conv1_2) + b_conv1_2)
h_pool1 = max_pool_2x2(h_conv1_2)
#Second convolutional layer(2 convolutions)
W_conv2_1 = weight_variable([5, 5, 64, 64])
b_conv2_1 = bias_variable([64])
h_conv2_1 = tf.nn.relu(conv2d(h_pool1, W_conv2_1) + b_conv2_1)
W_conv2_2 = weight_variable([5, 5, 64, 64])
b_conv2_2 = bias_variable([64])
h_conv2_2 = tf.nn.relu(conv2d(h_conv2_1, W_conv2_2) + b_conv2_2)
h_pool2 = max_pool_2x2(h_conv2_2)
#Third convolutional layer(3 convolutions)
W_conv3_1 = weight_variable([5, 5, 64, 64])
b_conv3_1 = bias_variable([64])
h_conv3_1 = tf.nn.relu(conv2d(h_pool2, W_conv3_1) + b_conv3_1)
W_conv3_2 = weight_variable([5, 5, 64, 64])
b_conv3_2 = bias_variable([64])
h_conv3_2 = tf.nn.relu(conv2d(h_conv3_1, W_conv3_2) + b_conv3_2)
W_conv3_3 = weight_variable([5, 5, 64, 64])
b_conv3_3 = bias_variable([64])
h_conv3_3 = tf.nn.relu(conv2d(h_conv3_2, W_conv3_3) + b_conv3_3)
h_pool3 = max_pool_2x2(h_conv3_3)
#Fourth convolutional layer(3 convolutions)
W_conv4_1 = weight_variable([5, 5, 64, 64])
b_conv4_1 = bias_variable([64])
h_conv4_1 = tf.nn.relu(conv2d(h_pool3, W_conv4_1) + b_conv4_1)
W_conv4_2 = weight_variable([5, 5, 64, 64])
b_conv4_2 = bias_variable([64])
h_conv4_2 = tf.nn.relu(conv2d(h_conv4_1, W_conv4_2) + b_conv4_2)
W_conv4_3 = weight_variable([5, 5, 64, 64])
b_conv4_3 = bias_variable([64])
h_conv4_3 = tf.nn.relu(conv2d(h_conv4_2, W_conv4_3) + b_conv4_3)
h_pool4 = max_pool_2x2(h_conv4_3)
#Train step definition
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=h_pool4))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#Initializing the saver to save our model after training
saver = tf.train.Saver()
#Training
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
images = np.load("dataset/images.npy")
labels = np.load("dataset/labels.npy")
for i in range(2, 100):
train_step.run(feed_dict={x: images[i % 29].reshape(1, 384, 512, 3), y_: labels[i % 29].reshape(1, 48, 64)})
print("Step: %f; Cross-entropy: %d"%(i, cross_entropy.eval(feed_dict={x: images[i % 29].reshape(1, 384, 512, 3), y_: labels[i % 29].reshape(1, 48, 64)})))
save_path = saver.save(sess, "Kernels/model")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment