Skip to content

Instantly share code, notes, and snippets.

@lucifermorningstar1305
Last active January 13, 2019 16:01
Show Gist options
  • Save lucifermorningstar1305/558f3e228d962816ac9abc4e983f70ff to your computer and use it in GitHub Desktop.
Save lucifermorningstar1305/558f3e228d962816ac9abc4e983f70ff to your computer and use it in GitHub Desktop.
vgg architecture
def convolutional_neural_network(x):
weights = {'W_conv1' : tf.Variable(tf.random_normal([3,3,3,64])),
'W_conv2' : tf.Variable(tf.random_normal([3,3,64,64])),
'W_conv3' : tf.Variable(tf.random_normal([3,3,64,128])),
'W_conv4' : tf.Variable(tf.random_normal([3,3,128,128])),
'W_conv5': tf.Variable(tf.random_normal([3,3,128,256])),
'W_conv6' : tf.Variable(tf.random_normal([3,3,256,256])),
'W_conv7' : tf.Variable(tf.random_normal([3,3,256,256])),
'W_conv8' : tf.Variable(tf.random_normal([3,3,256,512])),
'W_conv9' : tf.Variable(tf.random_normal([3,3,512,512])),
'W_conv10' : tf.Variable(tf.random_normal([3,3,512,512])),
'W_fc1' : tf.Variable(tf.random_normal([2*2*512, 1024])),
'W_fc2' : tf.Variable(tf.random_normal([1024, 1024])),
'W_fc3' : tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'b_conv1' : tf.Variable(tf.random_normal([64])),
'b_conv2' : tf.Variable(tf.random_normal([64])),
'b_conv3' : tf.Variable(tf.random_normal([128])),
'b_conv4' : tf.Variable(tf.random_normal([128])),
'b_conv5' : tf.Variable(tf.random_normal([256])),
'b_conv6' : tf.Variable(tf.random_normal([256])),
'b_conv7' : tf.Variable(tf.random_normal([256])),
'b_conv8' : tf.Variable(tf.random_normal([512])),
'b_conv9' : tf.Variable(tf.random_normal([512])),
'b_conv10' : tf.Variable(tf.random_normal([512])),
'b_fc1' : tf.Variable(tf.random_normal([1024])),
'b_fc2' : tf.Variable(tf.random_normal([1024])),
'b_fc3' : tf.Variable(tf.random_normal([n_classes]))}
x = tf.reshape(x, [-1, 32, 32, 3])
# Conv1
conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
# Conv2
conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
# Max pool
conv2 = maxpool2d(conv2)
# Conv3
conv3 = tf.nn.relu(conv2d(conv2, weights['W_conv3']) + biases['b_conv3'])
# Conv4
conv4 = tf.nn.relu(conv2d(conv3, weights['W_conv4']) + biases['b_conv4'])
# maxpool
conv4 = maxpool2d(conv4)
# Conv5
conv5 = tf.nn.relu(conv2d(conv4, weights['W_conv5']) + biases['b_conv5'])
#Conv6
conv6 = tf.nn.relu(conv2d(conv5, weights['W_conv6']) + biases['b_conv6'])
#Conv 7
conv7 = tf.nn.relu(conv2d(conv6, weights['W_conv7']) + biases['b_conv7'])
# max pool
conv7 = maxpool2d(conv7)
#Conv 8
conv8 = tf.nn.relu(conv2d(conv7, weights['W_conv8']) + biases['b_conv8'])
#Conv9
conv9 = tf.nn.relu(conv2d(conv8, weights['W_conv9']) + biases['b_conv9'])
#Conv10
conv10 = tf.nn.relu(conv2d(conv9, weights['W_conv10']) + biases['b_conv10'])
# max pool
conv10 = maxpool2d(conv10)
#fc1
fc1 = tf.reshape(conv10, [-1, 2*2*512])
fc1 = tf.nn.relu(tf.matmul(fc1, weights['W_fc1']) + biases['b_fc1'])
fc1 = tf.nn.dropout(fc1, keep_rate)
#fc2
fc2 = tf.nn.relu(tf.matmul(fc1, weights['W_fc2']) + biases['b_fc2'])
fc2 = tf.nn.dropout(fc2, keep_rate)
#Output Layer
output = tf.nn.softmax(tf.matmul(fc2, weights['W_fc3']) + biases['b_fc3'])
return output
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment