Created
February 2, 2018 01:00
-
-
Save schlerp/b1709686d33f1526ff76ede0e50f6ffe to your computer and use it in GitHub Desktop.
some of the inception v3 and v4 blocks implemented in TFLearn. I hope this helps someone!
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tflearn | |
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d | |
from tflearn.layers.merge_ops import merge | |
from tflearn.layers.normalization import local_response_normalization | |
def inceptionv3_block(input_block, filter_width=1, stride=1, | |
activation='relu', regularizer='L2', normalise=False): | |
# 1x1 convolutions | |
conv1x1_a = conv_2d(input_block, 64*filter_width, 1, stride, | |
activation=activation, regularizer=regularizer) | |
# 3x3 convolutions | |
conv1x1_b = conv_2d(input_block, 96*filter_width, 1, stride, | |
activation='linear', regularizer=regularizer) | |
conv3x3_b = conv_2d(conv1x1_b, 128*filter_width, 3, stride, | |
activation=activation, regularizer=regularizer) | |
# 5x5 convolutions | |
conv1x1_c = conv_2d(input_block, 16*filter_width, 1, stride, | |
activation='linear', regularizer=regularizer) | |
conv5x5_c = conv_2d(conv1x1_c, 32*filter_width, 5, stride, | |
activation=activation, regularizer=regularizer) | |
# maxpool layer | |
max_pool_d = max_pool_2d(input_block, 3) | |
conv1x1_d = conv_2d(max_pool_d, 32*filter_width, 1, stride, | |
activation='linear', regularizer=regularizer) | |
output = merge([conv1x1_a, conv3x3_b, conv5x5_c, conv1x1_d], 'concat', axis=-1) | |
if normalise: | |
output = local_response_normalization(output) | |
return output | |
def inceptionv4_stem_block(x, filter_width=1, activation='relu'): | |
# stem | |
stem = conv_2d(x, 32*filter_width, 3, 2, | |
padding='valid', activation=activation) | |
stem = conv_2d(stem, 32*filter_width, 3, 1, | |
padding='valid', activation=activation) | |
stem = conv_2d(stem, 64*filter_width, 3, 1, activation=activation) | |
# branch 1 a | |
maxpool_1a = max_pool_2d(stem, 3, 2, padding='valid') | |
# branch 1 b | |
conv3x3_1b = conv_2d(stem, 96*filter_width, 3, 2, | |
padding='valid', activation=activation) | |
# merge 1 | |
concat_1 = merge([maxpool_1a, conv3x3_1b], 'concat', axis=-1) | |
# branch 2 a | |
conv1x1_2a = conv_2d(concat_1, 64*filter_width, 1, 1, | |
activation=activation) | |
conv3x3_2a = conv_2d(conv1x1_2a, 96*filter_width, 3, 1, | |
padding='valid', activation=activation) | |
# branch 2 b | |
conv1x1_2b = conv_2d(concat_1, 64*filter_width, 1, 1, | |
activation=activation) | |
conv7x1_2b = conv_2d(conv1x1_2b, 64*filter_width, (7, 1), 1, | |
activation=activation) | |
conv1x7_2b = conv_2d(conv7x1_2b, 64*filter_width, (1, 7), 1, | |
activation=activation) | |
conv3x3_2b = conv_2d(conv1x7_2b, 96*filter_width, 3, 1, | |
padding='valid', activation=activation) | |
# merge 2 | |
concat_2 = merge([conv3x3_2a, conv3x3_2b], 'concat', axis=-1) | |
# branch 3 a | |
conv3x3_3a = conv_2d(stem, 192*filter_width, 3, 1, | |
padding='valid', activation=activation) | |
# branch 3 b | |
maxpool_3b = max_pool_2d(stem, 3, 2, padding='valid') | |
# merge 3 | |
concat_3 = merge([conv3x3_3a, maxpool_3b], 'concat', axis=-1) | |
return concat_3 | |
def inceptionv4_a_block(x, filter_width=1, activation='relu'): | |
# branch a | |
avgpool_a = avg_pool_2d(x, 3) | |
conv1x1_a = conv_2d(avgpool_a, 96*filter_width, 1, activation=activation) | |
# branch b | |
conv1x1_b = conv_2d(x, 96*filter_width, 1, activation=activation) | |
# branch c | |
conv1x1_c = conv_2d(x, 64*filter_width, 1, activation=activation) | |
conv3x3_c = conv_2d(conv1x1_c, 96*filter_width, 3, activation=activation) | |
# branch d | |
conv_1x1_d = conv_2d(x, 64*filter_width, 1, activation=activation) | |
conv_3x3_d = conv_2d(conv_1x1_d, 96*filter_width, 3, activation=activation) | |
conv_3x3_d = conv_2d(conv_3x3_d, 96*filter_width, 3, activation=activation) | |
# merge | |
concat_1 = merge([conv1x1_a, conv1x1_b, | |
conv3x3_c, conv3x3_d], 'concat', axis=-1) | |
return concat_1 | |
def inceptionv4_b_block(x, filter_width=1, activation='relu'): | |
# branch a | |
avgpool_a = avg_pool_2d(x, 3) | |
conv1x1_a = conv_2d(avgpool_a, 128*filter_width, 1, activation=activation) | |
# branch b | |
conv1x1_b = conv_2d(x, 384*filter_width, 1, activation=activation) | |
# branch c | |
conv1x1_c = conv_2d(x, 192*filter_width, 1, 1, | |
activation=activation) | |
conv1x7_1c = conv_2d(conv1x1_c, 224*filter_width, (1, 7), 1, | |
activation=activation) | |
conv1x7_2c = conv_2d(conv1x7_1c, 256*filter_width, (1, 7), 1, | |
activation=activation) | |
# branch d | |
conv1x1_d = conv_2d(x, 192*filter_width, 1, 1, | |
activation=activation) | |
conv1x7_1d = conv_2d(conv1x1_d, 192*filter_width, (1, 7), 1, | |
activation=activation) | |
conv7x1_1d = conv_2d(conv1x7_1d, 224*filter_width, (7, 1), 1, | |
activation=activation) | |
conv1x7_2d = conv_2d(conv7x1_1d, 224*filter_width, (1, 7), 1, | |
activation=activation) | |
conv7x1_2d = conv_2d(conv1x7_2d, 256*filter_width, (7, 1), 1, | |
activation=activation) | |
# merge | |
concat_1 = merge([conv1x1_a, conv1x1_b, | |
conv1x7_2c, conv7x1_2d], 'concat', axis=-1) | |
return concat_1 | |
def inceptionv4_c_block(x, filter_width=1, activation='relu'): | |
# branch a | |
avgpool_a = avg_pool_2d(x, 3) | |
conv1x1_a = conv_2d(avgpool_a, 256*filter_width, 1, activation=activation) | |
# branch b | |
conv1x1_b = conv_2d(x, 256*filter_width, 1, activation=activation) | |
# branch c | |
conv1x1_c = conv_2d(x, 384*filter_width, 1, 1, | |
activation=activation) | |
# branch c a | |
conv1x3_c_a = conv_2d(conv1x1_c, 256*filter_width, (1, 3), 1, | |
activation=activation) | |
# branch c b | |
conv3x1_c_b = conv_2d(conv1x1_c, 256*filter_width, (3, 1), 1, | |
activation=activation) | |
# branch d | |
conv1x1_d = conv_2d(x, 384*filter_width, 1, 1, | |
activation=activation) | |
conv1x3_d = conv_2d(conv1x1_d, 448*filter_width, (1, 3), 1, | |
activation=activation) | |
conv3x1_d = conv_2d(conv1x3_d, 512*filter_width, (3, 1), 1, | |
activation=activation) | |
# branch d a | |
conv1x3_d_a = conv_2d(conv3x1_d, 256*filter_width, (1, 3), 1, | |
activation=activation) | |
# branch d b | |
conv3x1_d_b = conv_2d(conv3x1_d, 256*filter_width, (3, 1), 1, | |
activation=activation) | |
# merge | |
concat_1 = merge([conv1x1_a, conv1x1_b, conv1x3_c_a, conv3x1_c_b, | |
conv1x3_d_a, conv3x1_d_b], 'concat', axis=-1) | |
return concat_1 | |
def inceptionv4_reduct_a_block(x, filter_width=1, activation='relu'): | |
# branch a | |
maxpool_a = max_pool_2d(x, 3, 2, padding='valid') | |
# branch b | |
conv3x3_b = conv_2d(x, 384*filter_width, 3, 2, padding='valid', | |
activation=activation) | |
# branch c | |
conv1x1_c = conv_2d(x, 192*filter_width, 1, 1, | |
activation=activation) | |
conv3x3_1c = conv_2d(conv1x1_c, 224*filter_width, 3, 1, | |
activation=activation) | |
conv3x3_2c = conv_2d(conv3x3_1c, 256*filter_width, 3, 2, padding='valid', | |
activation=activation) | |
# merge | |
concat_1 = merge([maxpool_a, conv3x3_b, conv3x3_2c], 'concat', axis=-1) | |
return concat_1 | |
def inceptionv4_reduct_b_block(x, filter_width=1, activation='relu'): | |
# branch a | |
maxpool_a = max_pool_2d(X, 3, 2, padding='valid') | |
# branch b | |
conv1x1_b = conv_2d(x, 192*filter_width, 1, 1, | |
activation=activation) | |
conv3x3_b = conv_2d(conv1x1_b, 192*filter_width, 3, 2, padding='valid', | |
activation=activation) | |
# branch c | |
conv1x1_c = conv_2d(x, 256*filter_width, 1, 1, | |
activation=activation) | |
conv1x7_1c = conv_2d(conv1x1_c, 256*filter_width, (1, 7), 1, | |
activation=activation) | |
conv7x1_2c = conv_2d(conv1x7_1c, 320*filter_width, (7, 1), 1, | |
activation=activation) | |
conv3x3_c = conv_2d(conv7x1_2c, 320*filter_width, 3, 2, padding='valid', | |
activation=activation) | |
# merge | |
concat_1 = merge([maxpool_a, conv3x3_b, conv3x3_c], 'concat', axis=-1) | |
return concat_1 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment