Skip to content

Instantly share code, notes, and snippets.

@walkoncross
Last active December 18, 2017 00:00
Show Gist options
  • Save walkoncross/cb2b1c90f0646da623a79c84bf9171d6 to your computer and use it in GitHub Desktop.
Save walkoncross/cb2b1c90f0646da623a79c84bf9171d6 to your computer and use it in GitHub Desktop.
name: "FACE-SE-ResNeXt-101 (32 x 4d)"
# mean_value: 104, 117, 123
#layer {
# name: "data"
# type: "Input"
# top: "data"
# input_param: {
# shape: {
# dim: 1
# dim: 3
# dim: 224
# dim: 224
# }
# }
#}
layer {
name: "data"
type: "ImageData"
top: "data"
top: "label"
# transform_param {
# mean_value: 127.5
# mean_value: 127.5
# mean_value: 127.5
# scale: 0.0078125
# mirror: true
# }
# mean pixel / channel-wise mean instead of mean image
transform_param {
# crop_size: 227
mean_value: 104
mean_value: 117
mean_value: 123
mirror: true
}
image_data_param {
source: "/disk2/zhaoyafei/face-recog-train/train-val-lists/webface/train_list_noval_10572-ids_450833-objs_170503-213839.txt"
batch_size: 120
shuffle: true
}
}
layer {
name: "conv1/7x7_s2"
type: "Convolution"
bottom: "data"
top: "conv1/7x7_s2"
convolution_param {
num_output: 64
bias_term: false
pad: 3
kernel_size: 7
stride: 2
}
}
layer {
name: "conv1/7x7_s2/bn"
type: "BatchNorm"
bottom: "conv1/7x7_s2"
top: "conv1/7x7_s2"
batch_norm_param {
}
}
layer {
name: "conv1/7x7_s2/bn/scale"
type: "Scale"
bottom: "conv1/7x7_s2"
top: "conv1/7x7_s2"
scale_param {
bias_term: true
}
}
layer {
name: "conv1/relu_7x7_s2"
type: "ReLU"
bottom: "conv1/7x7_s2"
top: "conv1/7x7_s2"
}
layer {
name: "pool1/3x3_s2"
type: "Pooling"
bottom: "conv1/7x7_s2"
top: "pool1/3x3_s2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2_1_1x1_reduce"
type: "Convolution"
bottom: "pool1/3x3_s2"
top: "conv2_1_1x1_reduce"
convolution_param {
num_output: 128
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_1_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv2_1_1x1_reduce"
top: "conv2_1_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv2_1_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv2_1_1x1_reduce"
top: "conv2_1_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_1_1x1_reduce/relu"
type: "ReLU"
bottom: "conv2_1_1x1_reduce"
top: "conv2_1_1x1_reduce"
}
layer {
name: "conv2_1_3x3"
type: "Convolution"
bottom: "conv2_1_1x1_reduce"
top: "conv2_1_3x3"
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv2_1_3x3/bn"
type: "BatchNorm"
bottom: "conv2_1_3x3"
top: "conv2_1_3x3"
batch_norm_param {
}
}
layer {
name: "conv2_1_3x3/bn/scale"
type: "Scale"
bottom: "conv2_1_3x3"
top: "conv2_1_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_1_3x3/relu"
type: "ReLU"
bottom: "conv2_1_3x3"
top: "conv2_1_3x3"
}
layer {
name: "conv2_1_1x1_increase"
type: "Convolution"
bottom: "conv2_1_3x3"
top: "conv2_1_1x1_increase"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_1_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv2_1_1x1_increase"
top: "conv2_1_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv2_1_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv2_1_1x1_increase"
top: "conv2_1_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_1_global_pool"
type: "Pooling"
bottom: "conv2_1_1x1_increase"
top: "conv2_1_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv2_1_1x1_down"
type: "Convolution"
bottom: "conv2_1_global_pool"
top: "conv2_1_1x1_down"
convolution_param {
num_output: 16
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_1_1x1_down/relu"
type: "ReLU"
bottom: "conv2_1_1x1_down"
top: "conv2_1_1x1_down"
}
layer {
name: "conv2_1_1x1_up"
type: "Convolution"
bottom: "conv2_1_1x1_down"
top: "conv2_1_1x1_up"
convolution_param {
num_output: 256
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_1_prob"
type: "Sigmoid"
bottom: "conv2_1_1x1_up"
top: "conv2_1_1x1_up"
}
layer {
name: "conv2_1_1x1_proj"
type: "Convolution"
bottom: "pool1/3x3_s2"
top: "conv2_1_1x1_proj"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_1_1x1_proj/bn"
type: "BatchNorm"
bottom: "conv2_1_1x1_proj"
top: "conv2_1_1x1_proj"
batch_norm_param {
}
}
layer {
name: "conv2_1_1x1_proj/bn/scale"
type: "Scale"
bottom: "conv2_1_1x1_proj"
top: "conv2_1_1x1_proj"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_1"
type: "Axpy"
bottom: "conv2_1_1x1_up"
bottom: "conv2_1_1x1_increase"
bottom: "conv2_1_1x1_proj"
top: "conv2_1"
}
layer {
name: "conv2_1/relu"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
name: "conv2_2_1x1_reduce"
type: "Convolution"
bottom: "conv2_1"
top: "conv2_2_1x1_reduce"
convolution_param {
num_output: 128
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_2_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv2_2_1x1_reduce"
top: "conv2_2_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv2_2_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv2_2_1x1_reduce"
top: "conv2_2_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_2_1x1_reduce/relu"
type: "ReLU"
bottom: "conv2_2_1x1_reduce"
top: "conv2_2_1x1_reduce"
}
layer {
name: "conv2_2_3x3"
type: "Convolution"
bottom: "conv2_2_1x1_reduce"
top: "conv2_2_3x3"
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv2_2_3x3/bn"
type: "BatchNorm"
bottom: "conv2_2_3x3"
top: "conv2_2_3x3"
batch_norm_param {
}
}
layer {
name: "conv2_2_3x3/bn/scale"
type: "Scale"
bottom: "conv2_2_3x3"
top: "conv2_2_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_2_3x3/relu"
type: "ReLU"
bottom: "conv2_2_3x3"
top: "conv2_2_3x3"
}
layer {
name: "conv2_2_1x1_increase"
type: "Convolution"
bottom: "conv2_2_3x3"
top: "conv2_2_1x1_increase"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_2_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv2_2_1x1_increase"
top: "conv2_2_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv2_2_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv2_2_1x1_increase"
top: "conv2_2_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_2_global_pool"
type: "Pooling"
bottom: "conv2_2_1x1_increase"
top: "conv2_2_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv2_2_1x1_down"
type: "Convolution"
bottom: "conv2_2_global_pool"
top: "conv2_2_1x1_down"
convolution_param {
num_output: 16
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_2_1x1_down/relu"
type: "ReLU"
bottom: "conv2_2_1x1_down"
top: "conv2_2_1x1_down"
}
layer {
name: "conv2_2_1x1_up"
type: "Convolution"
bottom: "conv2_2_1x1_down"
top: "conv2_2_1x1_up"
convolution_param {
num_output: 256
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_2_prob"
type: "Sigmoid"
bottom: "conv2_2_1x1_up"
top: "conv2_2_1x1_up"
}
layer {
name: "conv2_2"
type: "Axpy"
bottom: "conv2_2_1x1_up"
bottom: "conv2_2_1x1_increase"
bottom: "conv2_1"
top: "conv2_2"
}
layer {
name: "conv2_2/relu"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "conv2_3_1x1_reduce"
type: "Convolution"
bottom: "conv2_2"
top: "conv2_3_1x1_reduce"
convolution_param {
num_output: 128
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_3_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv2_3_1x1_reduce"
top: "conv2_3_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv2_3_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv2_3_1x1_reduce"
top: "conv2_3_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_3_1x1_reduce/relu"
type: "ReLU"
bottom: "conv2_3_1x1_reduce"
top: "conv2_3_1x1_reduce"
}
layer {
name: "conv2_3_3x3"
type: "Convolution"
bottom: "conv2_3_1x1_reduce"
top: "conv2_3_3x3"
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv2_3_3x3/bn"
type: "BatchNorm"
bottom: "conv2_3_3x3"
top: "conv2_3_3x3"
batch_norm_param {
}
}
layer {
name: "conv2_3_3x3/bn/scale"
type: "Scale"
bottom: "conv2_3_3x3"
top: "conv2_3_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_3_3x3/relu"
type: "ReLU"
bottom: "conv2_3_3x3"
top: "conv2_3_3x3"
}
layer {
name: "conv2_3_1x1_increase"
type: "Convolution"
bottom: "conv2_3_3x3"
top: "conv2_3_1x1_increase"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_3_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv2_3_1x1_increase"
top: "conv2_3_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv2_3_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv2_3_1x1_increase"
top: "conv2_3_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv2_3_global_pool"
type: "Pooling"
bottom: "conv2_3_1x1_increase"
top: "conv2_3_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv2_3_1x1_down"
type: "Convolution"
bottom: "conv2_3_global_pool"
top: "conv2_3_1x1_down"
convolution_param {
num_output: 16
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_3_1x1_down/relu"
type: "ReLU"
bottom: "conv2_3_1x1_down"
top: "conv2_3_1x1_down"
}
layer {
name: "conv2_3_1x1_up"
type: "Convolution"
bottom: "conv2_3_1x1_down"
top: "conv2_3_1x1_up"
convolution_param {
num_output: 256
kernel_size: 1
stride: 1
}
}
layer {
name: "conv2_3_prob"
type: "Sigmoid"
bottom: "conv2_3_1x1_up"
top: "conv2_3_1x1_up"
}
layer {
name: "conv2_3"
type: "Axpy"
bottom: "conv2_3_1x1_up"
bottom: "conv2_3_1x1_increase"
bottom: "conv2_2"
top: "conv2_3"
}
layer {
name: "conv2_3/relu"
type: "ReLU"
bottom: "conv2_3"
top: "conv2_3"
}
layer {
name: "conv3_1_1x1_reduce"
type: "Convolution"
bottom: "conv2_3"
top: "conv3_1_1x1_reduce"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_1_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv3_1_1x1_reduce"
top: "conv3_1_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv3_1_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv3_1_1x1_reduce"
top: "conv3_1_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_1_1x1_reduce/relu"
type: "ReLU"
bottom: "conv3_1_1x1_reduce"
top: "conv3_1_1x1_reduce"
}
layer {
name: "conv3_1_3x3"
type: "Convolution"
bottom: "conv3_1_1x1_reduce"
top: "conv3_1_3x3"
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 2
}
}
layer {
name: "conv3_1_3x3/bn"
type: "BatchNorm"
bottom: "conv3_1_3x3"
top: "conv3_1_3x3"
batch_norm_param {
}
}
layer {
name: "conv3_1_3x3/bn/scale"
type: "Scale"
bottom: "conv3_1_3x3"
top: "conv3_1_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_1_3x3/relu"
type: "ReLU"
bottom: "conv3_1_3x3"
top: "conv3_1_3x3"
}
layer {
name: "conv3_1_1x1_increase"
type: "Convolution"
bottom: "conv3_1_3x3"
top: "conv3_1_1x1_increase"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_1_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv3_1_1x1_increase"
top: "conv3_1_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv3_1_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv3_1_1x1_increase"
top: "conv3_1_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_1_global_pool"
type: "Pooling"
bottom: "conv3_1_1x1_increase"
top: "conv3_1_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv3_1_1x1_down"
type: "Convolution"
bottom: "conv3_1_global_pool"
top: "conv3_1_1x1_down"
convolution_param {
num_output: 32
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_1_1x1_down/relu"
type: "ReLU"
bottom: "conv3_1_1x1_down"
top: "conv3_1_1x1_down"
}
layer {
name: "conv3_1_1x1_up"
type: "Convolution"
bottom: "conv3_1_1x1_down"
top: "conv3_1_1x1_up"
convolution_param {
num_output: 512
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_1_prob"
type: "Sigmoid"
bottom: "conv3_1_1x1_up"
top: "conv3_1_1x1_up"
}
layer {
name: "conv3_1_1x1_proj"
type: "Convolution"
bottom: "conv2_3"
top: "conv3_1_1x1_proj"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 2
}
}
layer {
name: "conv3_1_1x1_proj/bn"
type: "BatchNorm"
bottom: "conv3_1_1x1_proj"
top: "conv3_1_1x1_proj"
batch_norm_param {
}
}
layer {
name: "conv3_1_1x1_proj/bn/scale"
type: "Scale"
bottom: "conv3_1_1x1_proj"
top: "conv3_1_1x1_proj"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_1"
type: "Axpy"
bottom: "conv3_1_1x1_up"
bottom: "conv3_1_1x1_increase"
bottom: "conv3_1_1x1_proj"
top: "conv3_1"
}
layer {
name: "conv3_1/relu"
type: "ReLU"
bottom: "conv3_1"
top: "conv3_1"
}
layer {
name: "conv3_2_1x1_reduce"
type: "Convolution"
bottom: "conv3_1"
top: "conv3_2_1x1_reduce"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_2_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv3_2_1x1_reduce"
top: "conv3_2_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv3_2_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv3_2_1x1_reduce"
top: "conv3_2_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_2_1x1_reduce/relu"
type: "ReLU"
bottom: "conv3_2_1x1_reduce"
top: "conv3_2_1x1_reduce"
}
layer {
name: "conv3_2_3x3"
type: "Convolution"
bottom: "conv3_2_1x1_reduce"
top: "conv3_2_3x3"
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv3_2_3x3/bn"
type: "BatchNorm"
bottom: "conv3_2_3x3"
top: "conv3_2_3x3"
batch_norm_param {
}
}
layer {
name: "conv3_2_3x3/bn/scale"
type: "Scale"
bottom: "conv3_2_3x3"
top: "conv3_2_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_2_3x3/relu"
type: "ReLU"
bottom: "conv3_2_3x3"
top: "conv3_2_3x3"
}
layer {
name: "conv3_2_1x1_increase"
type: "Convolution"
bottom: "conv3_2_3x3"
top: "conv3_2_1x1_increase"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_2_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv3_2_1x1_increase"
top: "conv3_2_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv3_2_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv3_2_1x1_increase"
top: "conv3_2_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_2_global_pool"
type: "Pooling"
bottom: "conv3_2_1x1_increase"
top: "conv3_2_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv3_2_1x1_down"
type: "Convolution"
bottom: "conv3_2_global_pool"
top: "conv3_2_1x1_down"
convolution_param {
num_output: 32
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_2_1x1_down/relu"
type: "ReLU"
bottom: "conv3_2_1x1_down"
top: "conv3_2_1x1_down"
}
layer {
name: "conv3_2_1x1_up"
type: "Convolution"
bottom: "conv3_2_1x1_down"
top: "conv3_2_1x1_up"
convolution_param {
num_output: 512
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_2_prob"
type: "Sigmoid"
bottom: "conv3_2_1x1_up"
top: "conv3_2_1x1_up"
}
layer {
name: "conv3_2"
type: "Axpy"
bottom: "conv3_2_1x1_up"
bottom: "conv3_2_1x1_increase"
bottom: "conv3_1"
top: "conv3_2"
}
layer {
name: "conv3_2/relu"
type: "ReLU"
bottom: "conv3_2"
top: "conv3_2"
}
layer {
name: "conv3_3_1x1_reduce"
type: "Convolution"
bottom: "conv3_2"
top: "conv3_3_1x1_reduce"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_3_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv3_3_1x1_reduce"
top: "conv3_3_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv3_3_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv3_3_1x1_reduce"
top: "conv3_3_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_3_1x1_reduce/relu"
type: "ReLU"
bottom: "conv3_3_1x1_reduce"
top: "conv3_3_1x1_reduce"
}
layer {
name: "conv3_3_3x3"
type: "Convolution"
bottom: "conv3_3_1x1_reduce"
top: "conv3_3_3x3"
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv3_3_3x3/bn"
type: "BatchNorm"
bottom: "conv3_3_3x3"
top: "conv3_3_3x3"
batch_norm_param {
}
}
layer {
name: "conv3_3_3x3/bn/scale"
type: "Scale"
bottom: "conv3_3_3x3"
top: "conv3_3_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_3_3x3/relu"
type: "ReLU"
bottom: "conv3_3_3x3"
top: "conv3_3_3x3"
}
layer {
name: "conv3_3_1x1_increase"
type: "Convolution"
bottom: "conv3_3_3x3"
top: "conv3_3_1x1_increase"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_3_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv3_3_1x1_increase"
top: "conv3_3_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv3_3_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv3_3_1x1_increase"
top: "conv3_3_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_3_global_pool"
type: "Pooling"
bottom: "conv3_3_1x1_increase"
top: "conv3_3_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv3_3_1x1_down"
type: "Convolution"
bottom: "conv3_3_global_pool"
top: "conv3_3_1x1_down"
convolution_param {
num_output: 32
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_3_1x1_down/relu"
type: "ReLU"
bottom: "conv3_3_1x1_down"
top: "conv3_3_1x1_down"
}
layer {
name: "conv3_3_1x1_up"
type: "Convolution"
bottom: "conv3_3_1x1_down"
top: "conv3_3_1x1_up"
convolution_param {
num_output: 512
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_3_prob"
type: "Sigmoid"
bottom: "conv3_3_1x1_up"
top: "conv3_3_1x1_up"
}
layer {
name: "conv3_3"
type: "Axpy"
bottom: "conv3_3_1x1_up"
bottom: "conv3_3_1x1_increase"
bottom: "conv3_2"
top: "conv3_3"
}
layer {
name: "conv3_3/relu"
type: "ReLU"
bottom: "conv3_3"
top: "conv3_3"
}
layer {
name: "conv3_4_1x1_reduce"
type: "Convolution"
bottom: "conv3_3"
top: "conv3_4_1x1_reduce"
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_4_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv3_4_1x1_reduce"
top: "conv3_4_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv3_4_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv3_4_1x1_reduce"
top: "conv3_4_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_4_1x1_reduce/relu"
type: "ReLU"
bottom: "conv3_4_1x1_reduce"
top: "conv3_4_1x1_reduce"
}
layer {
name: "conv3_4_3x3"
type: "Convolution"
bottom: "conv3_4_1x1_reduce"
top: "conv3_4_3x3"
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv3_4_3x3/bn"
type: "BatchNorm"
bottom: "conv3_4_3x3"
top: "conv3_4_3x3"
batch_norm_param {
}
}
layer {
name: "conv3_4_3x3/bn/scale"
type: "Scale"
bottom: "conv3_4_3x3"
top: "conv3_4_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_4_3x3/relu"
type: "ReLU"
bottom: "conv3_4_3x3"
top: "conv3_4_3x3"
}
layer {
name: "conv3_4_1x1_increase"
type: "Convolution"
bottom: "conv3_4_3x3"
top: "conv3_4_1x1_increase"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_4_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv3_4_1x1_increase"
top: "conv3_4_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv3_4_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv3_4_1x1_increase"
top: "conv3_4_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv3_4_global_pool"
type: "Pooling"
bottom: "conv3_4_1x1_increase"
top: "conv3_4_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv3_4_1x1_down"
type: "Convolution"
bottom: "conv3_4_global_pool"
top: "conv3_4_1x1_down"
convolution_param {
num_output: 32
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_4_1x1_down/relu"
type: "ReLU"
bottom: "conv3_4_1x1_down"
top: "conv3_4_1x1_down"
}
layer {
name: "conv3_4_1x1_up"
type: "Convolution"
bottom: "conv3_4_1x1_down"
top: "conv3_4_1x1_up"
convolution_param {
num_output: 512
kernel_size: 1
stride: 1
}
}
layer {
name: "conv3_4_prob"
type: "Sigmoid"
bottom: "conv3_4_1x1_up"
top: "conv3_4_1x1_up"
}
layer {
name: "conv3_4"
type: "Axpy"
bottom: "conv3_4_1x1_up"
bottom: "conv3_4_1x1_increase"
bottom: "conv3_3"
top: "conv3_4"
}
layer {
name: "conv3_4/relu"
type: "ReLU"
bottom: "conv3_4"
top: "conv3_4"
}
layer {
name: "conv4_1_1x1_reduce"
type: "Convolution"
bottom: "conv3_4"
top: "conv4_1_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_1_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_1_1x1_reduce"
top: "conv4_1_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_1_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_1_1x1_reduce"
top: "conv4_1_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_1_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_1_1x1_reduce"
top: "conv4_1_1x1_reduce"
}
layer {
name: "conv4_1_3x3"
type: "Convolution"
bottom: "conv4_1_1x1_reduce"
top: "conv4_1_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 2
}
}
layer {
name: "conv4_1_3x3/bn"
type: "BatchNorm"
bottom: "conv4_1_3x3"
top: "conv4_1_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_1_3x3/bn/scale"
type: "Scale"
bottom: "conv4_1_3x3"
top: "conv4_1_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_1_3x3/relu"
type: "ReLU"
bottom: "conv4_1_3x3"
top: "conv4_1_3x3"
}
layer {
name: "conv4_1_1x1_increase"
type: "Convolution"
bottom: "conv4_1_3x3"
top: "conv4_1_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_1_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_1_1x1_increase"
top: "conv4_1_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_1_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_1_1x1_increase"
top: "conv4_1_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_1_global_pool"
type: "Pooling"
bottom: "conv4_1_1x1_increase"
top: "conv4_1_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_1_1x1_down"
type: "Convolution"
bottom: "conv4_1_global_pool"
top: "conv4_1_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_1_1x1_down/relu"
type: "ReLU"
bottom: "conv4_1_1x1_down"
top: "conv4_1_1x1_down"
}
layer {
name: "conv4_1_1x1_up"
type: "Convolution"
bottom: "conv4_1_1x1_down"
top: "conv4_1_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_1_prob"
type: "Sigmoid"
bottom: "conv4_1_1x1_up"
top: "conv4_1_1x1_up"
}
layer {
name: "conv4_1_1x1_proj"
type: "Convolution"
bottom: "conv3_4"
top: "conv4_1_1x1_proj"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 2
}
}
layer {
name: "conv4_1_1x1_proj/bn"
type: "BatchNorm"
bottom: "conv4_1_1x1_proj"
top: "conv4_1_1x1_proj"
batch_norm_param {
}
}
layer {
name: "conv4_1_1x1_proj/bn/scale"
type: "Scale"
bottom: "conv4_1_1x1_proj"
top: "conv4_1_1x1_proj"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_1"
type: "Axpy"
bottom: "conv4_1_1x1_up"
bottom: "conv4_1_1x1_increase"
bottom: "conv4_1_1x1_proj"
top: "conv4_1"
}
layer {
name: "conv4_1/relu"
type: "ReLU"
bottom: "conv4_1"
top: "conv4_1"
}
layer {
name: "conv4_2_1x1_reduce"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_2_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_2_1x1_reduce"
top: "conv4_2_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_2_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_2_1x1_reduce"
top: "conv4_2_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_2_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_2_1x1_reduce"
top: "conv4_2_1x1_reduce"
}
layer {
name: "conv4_2_3x3"
type: "Convolution"
bottom: "conv4_2_1x1_reduce"
top: "conv4_2_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_2_3x3/bn"
type: "BatchNorm"
bottom: "conv4_2_3x3"
top: "conv4_2_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_2_3x3/bn/scale"
type: "Scale"
bottom: "conv4_2_3x3"
top: "conv4_2_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_2_3x3/relu"
type: "ReLU"
bottom: "conv4_2_3x3"
top: "conv4_2_3x3"
}
layer {
name: "conv4_2_1x1_increase"
type: "Convolution"
bottom: "conv4_2_3x3"
top: "conv4_2_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_2_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_2_1x1_increase"
top: "conv4_2_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_2_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_2_1x1_increase"
top: "conv4_2_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_2_global_pool"
type: "Pooling"
bottom: "conv4_2_1x1_increase"
top: "conv4_2_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_2_1x1_down"
type: "Convolution"
bottom: "conv4_2_global_pool"
top: "conv4_2_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_2_1x1_down/relu"
type: "ReLU"
bottom: "conv4_2_1x1_down"
top: "conv4_2_1x1_down"
}
layer {
name: "conv4_2_1x1_up"
type: "Convolution"
bottom: "conv4_2_1x1_down"
top: "conv4_2_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_2_prob"
type: "Sigmoid"
bottom: "conv4_2_1x1_up"
top: "conv4_2_1x1_up"
}
layer {
name: "conv4_2"
type: "Axpy"
bottom: "conv4_2_1x1_up"
bottom: "conv4_2_1x1_increase"
bottom: "conv4_1"
top: "conv4_2"
}
layer {
name: "conv4_2/relu"
type: "ReLU"
bottom: "conv4_2"
top: "conv4_2"
}
layer {
name: "conv4_3_1x1_reduce"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_3_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_3_1x1_reduce"
top: "conv4_3_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_3_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_3_1x1_reduce"
top: "conv4_3_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_3_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_3_1x1_reduce"
top: "conv4_3_1x1_reduce"
}
layer {
name: "conv4_3_3x3"
type: "Convolution"
bottom: "conv4_3_1x1_reduce"
top: "conv4_3_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_3_3x3/bn"
type: "BatchNorm"
bottom: "conv4_3_3x3"
top: "conv4_3_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_3_3x3/bn/scale"
type: "Scale"
bottom: "conv4_3_3x3"
top: "conv4_3_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_3_3x3/relu"
type: "ReLU"
bottom: "conv4_3_3x3"
top: "conv4_3_3x3"
}
layer {
name: "conv4_3_1x1_increase"
type: "Convolution"
bottom: "conv4_3_3x3"
top: "conv4_3_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_3_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_3_1x1_increase"
top: "conv4_3_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_3_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_3_1x1_increase"
top: "conv4_3_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_3_global_pool"
type: "Pooling"
bottom: "conv4_3_1x1_increase"
top: "conv4_3_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_3_1x1_down"
type: "Convolution"
bottom: "conv4_3_global_pool"
top: "conv4_3_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_3_1x1_down/relu"
type: "ReLU"
bottom: "conv4_3_1x1_down"
top: "conv4_3_1x1_down"
}
layer {
name: "conv4_3_1x1_up"
type: "Convolution"
bottom: "conv4_3_1x1_down"
top: "conv4_3_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_3_prob"
type: "Sigmoid"
bottom: "conv4_3_1x1_up"
top: "conv4_3_1x1_up"
}
layer {
name: "conv4_3"
type: "Axpy"
bottom: "conv4_3_1x1_up"
bottom: "conv4_3_1x1_increase"
bottom: "conv4_2"
top: "conv4_3"
}
layer {
name: "conv4_3/relu"
type: "ReLU"
bottom: "conv4_3"
top: "conv4_3"
}
layer {
name: "conv4_4_1x1_reduce"
type: "Convolution"
bottom: "conv4_3"
top: "conv4_4_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_4_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_4_1x1_reduce"
top: "conv4_4_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_4_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_4_1x1_reduce"
top: "conv4_4_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_4_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_4_1x1_reduce"
top: "conv4_4_1x1_reduce"
}
layer {
name: "conv4_4_3x3"
type: "Convolution"
bottom: "conv4_4_1x1_reduce"
top: "conv4_4_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_4_3x3/bn"
type: "BatchNorm"
bottom: "conv4_4_3x3"
top: "conv4_4_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_4_3x3/bn/scale"
type: "Scale"
bottom: "conv4_4_3x3"
top: "conv4_4_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_4_3x3/relu"
type: "ReLU"
bottom: "conv4_4_3x3"
top: "conv4_4_3x3"
}
layer {
name: "conv4_4_1x1_increase"
type: "Convolution"
bottom: "conv4_4_3x3"
top: "conv4_4_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_4_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_4_1x1_increase"
top: "conv4_4_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_4_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_4_1x1_increase"
top: "conv4_4_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_4_global_pool"
type: "Pooling"
bottom: "conv4_4_1x1_increase"
top: "conv4_4_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_4_1x1_down"
type: "Convolution"
bottom: "conv4_4_global_pool"
top: "conv4_4_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_4_1x1_down/relu"
type: "ReLU"
bottom: "conv4_4_1x1_down"
top: "conv4_4_1x1_down"
}
layer {
name: "conv4_4_1x1_up"
type: "Convolution"
bottom: "conv4_4_1x1_down"
top: "conv4_4_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_4_prob"
type: "Sigmoid"
bottom: "conv4_4_1x1_up"
top: "conv4_4_1x1_up"
}
layer {
name: "conv4_4"
type: "Axpy"
bottom: "conv4_4_1x1_up"
bottom: "conv4_4_1x1_increase"
bottom: "conv4_3"
top: "conv4_4"
}
layer {
name: "conv4_4/relu"
type: "ReLU"
bottom: "conv4_4"
top: "conv4_4"
}
layer {
name: "conv4_5_1x1_reduce"
type: "Convolution"
bottom: "conv4_4"
top: "conv4_5_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_5_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_5_1x1_reduce"
top: "conv4_5_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_5_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_5_1x1_reduce"
top: "conv4_5_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_5_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_5_1x1_reduce"
top: "conv4_5_1x1_reduce"
}
layer {
name: "conv4_5_3x3"
type: "Convolution"
bottom: "conv4_5_1x1_reduce"
top: "conv4_5_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_5_3x3/bn"
type: "BatchNorm"
bottom: "conv4_5_3x3"
top: "conv4_5_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_5_3x3/bn/scale"
type: "Scale"
bottom: "conv4_5_3x3"
top: "conv4_5_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_5_3x3/relu"
type: "ReLU"
bottom: "conv4_5_3x3"
top: "conv4_5_3x3"
}
layer {
name: "conv4_5_1x1_increase"
type: "Convolution"
bottom: "conv4_5_3x3"
top: "conv4_5_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_5_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_5_1x1_increase"
top: "conv4_5_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_5_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_5_1x1_increase"
top: "conv4_5_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_5_global_pool"
type: "Pooling"
bottom: "conv4_5_1x1_increase"
top: "conv4_5_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_5_1x1_down"
type: "Convolution"
bottom: "conv4_5_global_pool"
top: "conv4_5_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_5_1x1_down/relu"
type: "ReLU"
bottom: "conv4_5_1x1_down"
top: "conv4_5_1x1_down"
}
layer {
name: "conv4_5_1x1_up"
type: "Convolution"
bottom: "conv4_5_1x1_down"
top: "conv4_5_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_5_prob"
type: "Sigmoid"
bottom: "conv4_5_1x1_up"
top: "conv4_5_1x1_up"
}
layer {
name: "conv4_5"
type: "Axpy"
bottom: "conv4_5_1x1_up"
bottom: "conv4_5_1x1_increase"
bottom: "conv4_4"
top: "conv4_5"
}
layer {
name: "conv4_5/relu"
type: "ReLU"
bottom: "conv4_5"
top: "conv4_5"
}
layer {
name: "conv4_6_1x1_reduce"
type: "Convolution"
bottom: "conv4_5"
top: "conv4_6_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_6_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_6_1x1_reduce"
top: "conv4_6_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_6_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_6_1x1_reduce"
top: "conv4_6_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_6_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_6_1x1_reduce"
top: "conv4_6_1x1_reduce"
}
layer {
name: "conv4_6_3x3"
type: "Convolution"
bottom: "conv4_6_1x1_reduce"
top: "conv4_6_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_6_3x3/bn"
type: "BatchNorm"
bottom: "conv4_6_3x3"
top: "conv4_6_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_6_3x3/bn/scale"
type: "Scale"
bottom: "conv4_6_3x3"
top: "conv4_6_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_6_3x3/relu"
type: "ReLU"
bottom: "conv4_6_3x3"
top: "conv4_6_3x3"
}
layer {
name: "conv4_6_1x1_increase"
type: "Convolution"
bottom: "conv4_6_3x3"
top: "conv4_6_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_6_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_6_1x1_increase"
top: "conv4_6_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_6_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_6_1x1_increase"
top: "conv4_6_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_6_global_pool"
type: "Pooling"
bottom: "conv4_6_1x1_increase"
top: "conv4_6_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_6_1x1_down"
type: "Convolution"
bottom: "conv4_6_global_pool"
top: "conv4_6_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_6_1x1_down/relu"
type: "ReLU"
bottom: "conv4_6_1x1_down"
top: "conv4_6_1x1_down"
}
layer {
name: "conv4_6_1x1_up"
type: "Convolution"
bottom: "conv4_6_1x1_down"
top: "conv4_6_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_6_prob"
type: "Sigmoid"
bottom: "conv4_6_1x1_up"
top: "conv4_6_1x1_up"
}
layer {
name: "conv4_6"
type: "Axpy"
bottom: "conv4_6_1x1_up"
bottom: "conv4_6_1x1_increase"
bottom: "conv4_5"
top: "conv4_6"
}
layer {
name: "conv4_6/relu"
type: "ReLU"
bottom: "conv4_6"
top: "conv4_6"
}
layer {
name: "conv4_7_1x1_reduce"
type: "Convolution"
bottom: "conv4_6"
top: "conv4_7_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_7_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_7_1x1_reduce"
top: "conv4_7_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_7_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_7_1x1_reduce"
top: "conv4_7_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_7_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_7_1x1_reduce"
top: "conv4_7_1x1_reduce"
}
layer {
name: "conv4_7_3x3"
type: "Convolution"
bottom: "conv4_7_1x1_reduce"
top: "conv4_7_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_7_3x3/bn"
type: "BatchNorm"
bottom: "conv4_7_3x3"
top: "conv4_7_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_7_3x3/bn/scale"
type: "Scale"
bottom: "conv4_7_3x3"
top: "conv4_7_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_7_3x3/relu"
type: "ReLU"
bottom: "conv4_7_3x3"
top: "conv4_7_3x3"
}
layer {
name: "conv4_7_1x1_increase"
type: "Convolution"
bottom: "conv4_7_3x3"
top: "conv4_7_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_7_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_7_1x1_increase"
top: "conv4_7_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_7_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_7_1x1_increase"
top: "conv4_7_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_7_global_pool"
type: "Pooling"
bottom: "conv4_7_1x1_increase"
top: "conv4_7_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_7_1x1_down"
type: "Convolution"
bottom: "conv4_7_global_pool"
top: "conv4_7_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_7_1x1_down/relu"
type: "ReLU"
bottom: "conv4_7_1x1_down"
top: "conv4_7_1x1_down"
}
layer {
name: "conv4_7_1x1_up"
type: "Convolution"
bottom: "conv4_7_1x1_down"
top: "conv4_7_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_7_prob"
type: "Sigmoid"
bottom: "conv4_7_1x1_up"
top: "conv4_7_1x1_up"
}
layer {
name: "conv4_7"
type: "Axpy"
bottom: "conv4_7_1x1_up"
bottom: "conv4_7_1x1_increase"
bottom: "conv4_6"
top: "conv4_7"
}
layer {
name: "conv4_7/relu"
type: "ReLU"
bottom: "conv4_7"
top: "conv4_7"
}
layer {
name: "conv4_8_1x1_reduce"
type: "Convolution"
bottom: "conv4_7"
top: "conv4_8_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_8_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_8_1x1_reduce"
top: "conv4_8_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_8_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_8_1x1_reduce"
top: "conv4_8_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_8_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_8_1x1_reduce"
top: "conv4_8_1x1_reduce"
}
layer {
name: "conv4_8_3x3"
type: "Convolution"
bottom: "conv4_8_1x1_reduce"
top: "conv4_8_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_8_3x3/bn"
type: "BatchNorm"
bottom: "conv4_8_3x3"
top: "conv4_8_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_8_3x3/bn/scale"
type: "Scale"
bottom: "conv4_8_3x3"
top: "conv4_8_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_8_3x3/relu"
type: "ReLU"
bottom: "conv4_8_3x3"
top: "conv4_8_3x3"
}
layer {
name: "conv4_8_1x1_increase"
type: "Convolution"
bottom: "conv4_8_3x3"
top: "conv4_8_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_8_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_8_1x1_increase"
top: "conv4_8_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_8_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_8_1x1_increase"
top: "conv4_8_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_8_global_pool"
type: "Pooling"
bottom: "conv4_8_1x1_increase"
top: "conv4_8_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_8_1x1_down"
type: "Convolution"
bottom: "conv4_8_global_pool"
top: "conv4_8_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_8_1x1_down/relu"
type: "ReLU"
bottom: "conv4_8_1x1_down"
top: "conv4_8_1x1_down"
}
layer {
name: "conv4_8_1x1_up"
type: "Convolution"
bottom: "conv4_8_1x1_down"
top: "conv4_8_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_8_prob"
type: "Sigmoid"
bottom: "conv4_8_1x1_up"
top: "conv4_8_1x1_up"
}
layer {
name: "conv4_8"
type: "Axpy"
bottom: "conv4_8_1x1_up"
bottom: "conv4_8_1x1_increase"
bottom: "conv4_7"
top: "conv4_8"
}
layer {
name: "conv4_8/relu"
type: "ReLU"
bottom: "conv4_8"
top: "conv4_8"
}
layer {
name: "conv4_9_1x1_reduce"
type: "Convolution"
bottom: "conv4_8"
top: "conv4_9_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_9_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_9_1x1_reduce"
top: "conv4_9_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_9_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_9_1x1_reduce"
top: "conv4_9_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_9_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_9_1x1_reduce"
top: "conv4_9_1x1_reduce"
}
layer {
name: "conv4_9_3x3"
type: "Convolution"
bottom: "conv4_9_1x1_reduce"
top: "conv4_9_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_9_3x3/bn"
type: "BatchNorm"
bottom: "conv4_9_3x3"
top: "conv4_9_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_9_3x3/bn/scale"
type: "Scale"
bottom: "conv4_9_3x3"
top: "conv4_9_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_9_3x3/relu"
type: "ReLU"
bottom: "conv4_9_3x3"
top: "conv4_9_3x3"
}
layer {
name: "conv4_9_1x1_increase"
type: "Convolution"
bottom: "conv4_9_3x3"
top: "conv4_9_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_9_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_9_1x1_increase"
top: "conv4_9_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_9_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_9_1x1_increase"
top: "conv4_9_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_9_global_pool"
type: "Pooling"
bottom: "conv4_9_1x1_increase"
top: "conv4_9_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_9_1x1_down"
type: "Convolution"
bottom: "conv4_9_global_pool"
top: "conv4_9_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_9_1x1_down/relu"
type: "ReLU"
bottom: "conv4_9_1x1_down"
top: "conv4_9_1x1_down"
}
layer {
name: "conv4_9_1x1_up"
type: "Convolution"
bottom: "conv4_9_1x1_down"
top: "conv4_9_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_9_prob"
type: "Sigmoid"
bottom: "conv4_9_1x1_up"
top: "conv4_9_1x1_up"
}
layer {
name: "conv4_9"
type: "Axpy"
bottom: "conv4_9_1x1_up"
bottom: "conv4_9_1x1_increase"
bottom: "conv4_8"
top: "conv4_9"
}
layer {
name: "conv4_9/relu"
type: "ReLU"
bottom: "conv4_9"
top: "conv4_9"
}
layer {
name: "conv4_10_1x1_reduce"
type: "Convolution"
bottom: "conv4_9"
top: "conv4_10_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_10_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_10_1x1_reduce"
top: "conv4_10_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_10_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_10_1x1_reduce"
top: "conv4_10_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_10_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_10_1x1_reduce"
top: "conv4_10_1x1_reduce"
}
layer {
name: "conv4_10_3x3"
type: "Convolution"
bottom: "conv4_10_1x1_reduce"
top: "conv4_10_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_10_3x3/bn"
type: "BatchNorm"
bottom: "conv4_10_3x3"
top: "conv4_10_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_10_3x3/bn/scale"
type: "Scale"
bottom: "conv4_10_3x3"
top: "conv4_10_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_10_3x3/relu"
type: "ReLU"
bottom: "conv4_10_3x3"
top: "conv4_10_3x3"
}
layer {
name: "conv4_10_1x1_increase"
type: "Convolution"
bottom: "conv4_10_3x3"
top: "conv4_10_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_10_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_10_1x1_increase"
top: "conv4_10_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_10_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_10_1x1_increase"
top: "conv4_10_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_10_global_pool"
type: "Pooling"
bottom: "conv4_10_1x1_increase"
top: "conv4_10_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_10_1x1_down"
type: "Convolution"
bottom: "conv4_10_global_pool"
top: "conv4_10_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_10_1x1_down/relu"
type: "ReLU"
bottom: "conv4_10_1x1_down"
top: "conv4_10_1x1_down"
}
layer {
name: "conv4_10_1x1_up"
type: "Convolution"
bottom: "conv4_10_1x1_down"
top: "conv4_10_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_10_prob"
type: "Sigmoid"
bottom: "conv4_10_1x1_up"
top: "conv4_10_1x1_up"
}
layer {
name: "conv4_10"
type: "Axpy"
bottom: "conv4_10_1x1_up"
bottom: "conv4_10_1x1_increase"
bottom: "conv4_9"
top: "conv4_10"
}
layer {
name: "conv4_10/relu"
type: "ReLU"
bottom: "conv4_10"
top: "conv4_10"
}
layer {
name: "conv4_11_1x1_reduce"
type: "Convolution"
bottom: "conv4_10"
top: "conv4_11_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_11_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_11_1x1_reduce"
top: "conv4_11_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_11_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_11_1x1_reduce"
top: "conv4_11_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_11_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_11_1x1_reduce"
top: "conv4_11_1x1_reduce"
}
layer {
name: "conv4_11_3x3"
type: "Convolution"
bottom: "conv4_11_1x1_reduce"
top: "conv4_11_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_11_3x3/bn"
type: "BatchNorm"
bottom: "conv4_11_3x3"
top: "conv4_11_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_11_3x3/bn/scale"
type: "Scale"
bottom: "conv4_11_3x3"
top: "conv4_11_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_11_3x3/relu"
type: "ReLU"
bottom: "conv4_11_3x3"
top: "conv4_11_3x3"
}
layer {
name: "conv4_11_1x1_increase"
type: "Convolution"
bottom: "conv4_11_3x3"
top: "conv4_11_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_11_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_11_1x1_increase"
top: "conv4_11_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_11_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_11_1x1_increase"
top: "conv4_11_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_11_global_pool"
type: "Pooling"
bottom: "conv4_11_1x1_increase"
top: "conv4_11_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_11_1x1_down"
type: "Convolution"
bottom: "conv4_11_global_pool"
top: "conv4_11_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_11_1x1_down/relu"
type: "ReLU"
bottom: "conv4_11_1x1_down"
top: "conv4_11_1x1_down"
}
layer {
name: "conv4_11_1x1_up"
type: "Convolution"
bottom: "conv4_11_1x1_down"
top: "conv4_11_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_11_prob"
type: "Sigmoid"
bottom: "conv4_11_1x1_up"
top: "conv4_11_1x1_up"
}
layer {
name: "conv4_11"
type: "Axpy"
bottom: "conv4_11_1x1_up"
bottom: "conv4_11_1x1_increase"
bottom: "conv4_10"
top: "conv4_11"
}
layer {
name: "conv4_11/relu"
type: "ReLU"
bottom: "conv4_11"
top: "conv4_11"
}
layer {
name: "conv4_12_1x1_reduce"
type: "Convolution"
bottom: "conv4_11"
top: "conv4_12_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_12_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_12_1x1_reduce"
top: "conv4_12_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_12_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_12_1x1_reduce"
top: "conv4_12_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_12_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_12_1x1_reduce"
top: "conv4_12_1x1_reduce"
}
layer {
name: "conv4_12_3x3"
type: "Convolution"
bottom: "conv4_12_1x1_reduce"
top: "conv4_12_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_12_3x3/bn"
type: "BatchNorm"
bottom: "conv4_12_3x3"
top: "conv4_12_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_12_3x3/bn/scale"
type: "Scale"
bottom: "conv4_12_3x3"
top: "conv4_12_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_12_3x3/relu"
type: "ReLU"
bottom: "conv4_12_3x3"
top: "conv4_12_3x3"
}
layer {
name: "conv4_12_1x1_increase"
type: "Convolution"
bottom: "conv4_12_3x3"
top: "conv4_12_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_12_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_12_1x1_increase"
top: "conv4_12_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_12_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_12_1x1_increase"
top: "conv4_12_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_12_global_pool"
type: "Pooling"
bottom: "conv4_12_1x1_increase"
top: "conv4_12_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_12_1x1_down"
type: "Convolution"
bottom: "conv4_12_global_pool"
top: "conv4_12_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_12_1x1_down/relu"
type: "ReLU"
bottom: "conv4_12_1x1_down"
top: "conv4_12_1x1_down"
}
layer {
name: "conv4_12_1x1_up"
type: "Convolution"
bottom: "conv4_12_1x1_down"
top: "conv4_12_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_12_prob"
type: "Sigmoid"
bottom: "conv4_12_1x1_up"
top: "conv4_12_1x1_up"
}
layer {
name: "conv4_12"
type: "Axpy"
bottom: "conv4_12_1x1_up"
bottom: "conv4_12_1x1_increase"
bottom: "conv4_11"
top: "conv4_12"
}
layer {
name: "conv4_12/relu"
type: "ReLU"
bottom: "conv4_12"
top: "conv4_12"
}
layer {
name: "conv4_13_1x1_reduce"
type: "Convolution"
bottom: "conv4_12"
top: "conv4_13_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_13_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_13_1x1_reduce"
top: "conv4_13_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_13_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_13_1x1_reduce"
top: "conv4_13_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_13_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_13_1x1_reduce"
top: "conv4_13_1x1_reduce"
}
layer {
name: "conv4_13_3x3"
type: "Convolution"
bottom: "conv4_13_1x1_reduce"
top: "conv4_13_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_13_3x3/bn"
type: "BatchNorm"
bottom: "conv4_13_3x3"
top: "conv4_13_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_13_3x3/bn/scale"
type: "Scale"
bottom: "conv4_13_3x3"
top: "conv4_13_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_13_3x3/relu"
type: "ReLU"
bottom: "conv4_13_3x3"
top: "conv4_13_3x3"
}
layer {
name: "conv4_13_1x1_increase"
type: "Convolution"
bottom: "conv4_13_3x3"
top: "conv4_13_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_13_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_13_1x1_increase"
top: "conv4_13_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_13_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_13_1x1_increase"
top: "conv4_13_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_13_global_pool"
type: "Pooling"
bottom: "conv4_13_1x1_increase"
top: "conv4_13_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_13_1x1_down"
type: "Convolution"
bottom: "conv4_13_global_pool"
top: "conv4_13_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_13_1x1_down/relu"
type: "ReLU"
bottom: "conv4_13_1x1_down"
top: "conv4_13_1x1_down"
}
layer {
name: "conv4_13_1x1_up"
type: "Convolution"
bottom: "conv4_13_1x1_down"
top: "conv4_13_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_13_prob"
type: "Sigmoid"
bottom: "conv4_13_1x1_up"
top: "conv4_13_1x1_up"
}
layer {
name: "conv4_13"
type: "Axpy"
bottom: "conv4_13_1x1_up"
bottom: "conv4_13_1x1_increase"
bottom: "conv4_12"
top: "conv4_13"
}
layer {
name: "conv4_13/relu"
type: "ReLU"
bottom: "conv4_13"
top: "conv4_13"
}
layer {
name: "conv4_14_1x1_reduce"
type: "Convolution"
bottom: "conv4_13"
top: "conv4_14_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_14_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_14_1x1_reduce"
top: "conv4_14_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_14_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_14_1x1_reduce"
top: "conv4_14_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_14_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_14_1x1_reduce"
top: "conv4_14_1x1_reduce"
}
layer {
name: "conv4_14_3x3"
type: "Convolution"
bottom: "conv4_14_1x1_reduce"
top: "conv4_14_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_14_3x3/bn"
type: "BatchNorm"
bottom: "conv4_14_3x3"
top: "conv4_14_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_14_3x3/bn/scale"
type: "Scale"
bottom: "conv4_14_3x3"
top: "conv4_14_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_14_3x3/relu"
type: "ReLU"
bottom: "conv4_14_3x3"
top: "conv4_14_3x3"
}
layer {
name: "conv4_14_1x1_increase"
type: "Convolution"
bottom: "conv4_14_3x3"
top: "conv4_14_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_14_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_14_1x1_increase"
top: "conv4_14_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_14_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_14_1x1_increase"
top: "conv4_14_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_14_global_pool"
type: "Pooling"
bottom: "conv4_14_1x1_increase"
top: "conv4_14_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_14_1x1_down"
type: "Convolution"
bottom: "conv4_14_global_pool"
top: "conv4_14_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_14_1x1_down/relu"
type: "ReLU"
bottom: "conv4_14_1x1_down"
top: "conv4_14_1x1_down"
}
layer {
name: "conv4_14_1x1_up"
type: "Convolution"
bottom: "conv4_14_1x1_down"
top: "conv4_14_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_14_prob"
type: "Sigmoid"
bottom: "conv4_14_1x1_up"
top: "conv4_14_1x1_up"
}
layer {
name: "conv4_14"
type: "Axpy"
bottom: "conv4_14_1x1_up"
bottom: "conv4_14_1x1_increase"
bottom: "conv4_13"
top: "conv4_14"
}
layer {
name: "conv4_14/relu"
type: "ReLU"
bottom: "conv4_14"
top: "conv4_14"
}
layer {
name: "conv4_15_1x1_reduce"
type: "Convolution"
bottom: "conv4_14"
top: "conv4_15_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_15_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_15_1x1_reduce"
top: "conv4_15_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_15_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_15_1x1_reduce"
top: "conv4_15_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_15_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_15_1x1_reduce"
top: "conv4_15_1x1_reduce"
}
layer {
name: "conv4_15_3x3"
type: "Convolution"
bottom: "conv4_15_1x1_reduce"
top: "conv4_15_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_15_3x3/bn"
type: "BatchNorm"
bottom: "conv4_15_3x3"
top: "conv4_15_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_15_3x3/bn/scale"
type: "Scale"
bottom: "conv4_15_3x3"
top: "conv4_15_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_15_3x3/relu"
type: "ReLU"
bottom: "conv4_15_3x3"
top: "conv4_15_3x3"
}
layer {
name: "conv4_15_1x1_increase"
type: "Convolution"
bottom: "conv4_15_3x3"
top: "conv4_15_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_15_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_15_1x1_increase"
top: "conv4_15_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_15_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_15_1x1_increase"
top: "conv4_15_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_15_global_pool"
type: "Pooling"
bottom: "conv4_15_1x1_increase"
top: "conv4_15_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_15_1x1_down"
type: "Convolution"
bottom: "conv4_15_global_pool"
top: "conv4_15_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_15_1x1_down/relu"
type: "ReLU"
bottom: "conv4_15_1x1_down"
top: "conv4_15_1x1_down"
}
layer {
name: "conv4_15_1x1_up"
type: "Convolution"
bottom: "conv4_15_1x1_down"
top: "conv4_15_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_15_prob"
type: "Sigmoid"
bottom: "conv4_15_1x1_up"
top: "conv4_15_1x1_up"
}
layer {
name: "conv4_15"
type: "Axpy"
bottom: "conv4_15_1x1_up"
bottom: "conv4_15_1x1_increase"
bottom: "conv4_14"
top: "conv4_15"
}
layer {
name: "conv4_15/relu"
type: "ReLU"
bottom: "conv4_15"
top: "conv4_15"
}
layer {
name: "conv4_16_1x1_reduce"
type: "Convolution"
bottom: "conv4_15"
top: "conv4_16_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_16_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_16_1x1_reduce"
top: "conv4_16_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_16_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_16_1x1_reduce"
top: "conv4_16_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_16_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_16_1x1_reduce"
top: "conv4_16_1x1_reduce"
}
layer {
name: "conv4_16_3x3"
type: "Convolution"
bottom: "conv4_16_1x1_reduce"
top: "conv4_16_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_16_3x3/bn"
type: "BatchNorm"
bottom: "conv4_16_3x3"
top: "conv4_16_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_16_3x3/bn/scale"
type: "Scale"
bottom: "conv4_16_3x3"
top: "conv4_16_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_16_3x3/relu"
type: "ReLU"
bottom: "conv4_16_3x3"
top: "conv4_16_3x3"
}
layer {
name: "conv4_16_1x1_increase"
type: "Convolution"
bottom: "conv4_16_3x3"
top: "conv4_16_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_16_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_16_1x1_increase"
top: "conv4_16_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_16_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_16_1x1_increase"
top: "conv4_16_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_16_global_pool"
type: "Pooling"
bottom: "conv4_16_1x1_increase"
top: "conv4_16_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_16_1x1_down"
type: "Convolution"
bottom: "conv4_16_global_pool"
top: "conv4_16_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_16_1x1_down/relu"
type: "ReLU"
bottom: "conv4_16_1x1_down"
top: "conv4_16_1x1_down"
}
layer {
name: "conv4_16_1x1_up"
type: "Convolution"
bottom: "conv4_16_1x1_down"
top: "conv4_16_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_16_prob"
type: "Sigmoid"
bottom: "conv4_16_1x1_up"
top: "conv4_16_1x1_up"
}
layer {
name: "conv4_16"
type: "Axpy"
bottom: "conv4_16_1x1_up"
bottom: "conv4_16_1x1_increase"
bottom: "conv4_15"
top: "conv4_16"
}
layer {
name: "conv4_16/relu"
type: "ReLU"
bottom: "conv4_16"
top: "conv4_16"
}
layer {
name: "conv4_17_1x1_reduce"
type: "Convolution"
bottom: "conv4_16"
top: "conv4_17_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_17_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_17_1x1_reduce"
top: "conv4_17_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_17_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_17_1x1_reduce"
top: "conv4_17_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_17_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_17_1x1_reduce"
top: "conv4_17_1x1_reduce"
}
layer {
name: "conv4_17_3x3"
type: "Convolution"
bottom: "conv4_17_1x1_reduce"
top: "conv4_17_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_17_3x3/bn"
type: "BatchNorm"
bottom: "conv4_17_3x3"
top: "conv4_17_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_17_3x3/bn/scale"
type: "Scale"
bottom: "conv4_17_3x3"
top: "conv4_17_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_17_3x3/relu"
type: "ReLU"
bottom: "conv4_17_3x3"
top: "conv4_17_3x3"
}
layer {
name: "conv4_17_1x1_increase"
type: "Convolution"
bottom: "conv4_17_3x3"
top: "conv4_17_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_17_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_17_1x1_increase"
top: "conv4_17_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_17_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_17_1x1_increase"
top: "conv4_17_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_17_global_pool"
type: "Pooling"
bottom: "conv4_17_1x1_increase"
top: "conv4_17_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_17_1x1_down"
type: "Convolution"
bottom: "conv4_17_global_pool"
top: "conv4_17_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_17_1x1_down/relu"
type: "ReLU"
bottom: "conv4_17_1x1_down"
top: "conv4_17_1x1_down"
}
layer {
name: "conv4_17_1x1_up"
type: "Convolution"
bottom: "conv4_17_1x1_down"
top: "conv4_17_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_17_prob"
type: "Sigmoid"
bottom: "conv4_17_1x1_up"
top: "conv4_17_1x1_up"
}
layer {
name: "conv4_17"
type: "Axpy"
bottom: "conv4_17_1x1_up"
bottom: "conv4_17_1x1_increase"
bottom: "conv4_16"
top: "conv4_17"
}
layer {
name: "conv4_17/relu"
type: "ReLU"
bottom: "conv4_17"
top: "conv4_17"
}
layer {
name: "conv4_18_1x1_reduce"
type: "Convolution"
bottom: "conv4_17"
top: "conv4_18_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_18_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_18_1x1_reduce"
top: "conv4_18_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_18_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_18_1x1_reduce"
top: "conv4_18_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_18_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_18_1x1_reduce"
top: "conv4_18_1x1_reduce"
}
layer {
name: "conv4_18_3x3"
type: "Convolution"
bottom: "conv4_18_1x1_reduce"
top: "conv4_18_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_18_3x3/bn"
type: "BatchNorm"
bottom: "conv4_18_3x3"
top: "conv4_18_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_18_3x3/bn/scale"
type: "Scale"
bottom: "conv4_18_3x3"
top: "conv4_18_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_18_3x3/relu"
type: "ReLU"
bottom: "conv4_18_3x3"
top: "conv4_18_3x3"
}
layer {
name: "conv4_18_1x1_increase"
type: "Convolution"
bottom: "conv4_18_3x3"
top: "conv4_18_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_18_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_18_1x1_increase"
top: "conv4_18_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_18_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_18_1x1_increase"
top: "conv4_18_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_18_global_pool"
type: "Pooling"
bottom: "conv4_18_1x1_increase"
top: "conv4_18_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_18_1x1_down"
type: "Convolution"
bottom: "conv4_18_global_pool"
top: "conv4_18_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_18_1x1_down/relu"
type: "ReLU"
bottom: "conv4_18_1x1_down"
top: "conv4_18_1x1_down"
}
layer {
name: "conv4_18_1x1_up"
type: "Convolution"
bottom: "conv4_18_1x1_down"
top: "conv4_18_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_18_prob"
type: "Sigmoid"
bottom: "conv4_18_1x1_up"
top: "conv4_18_1x1_up"
}
layer {
name: "conv4_18"
type: "Axpy"
bottom: "conv4_18_1x1_up"
bottom: "conv4_18_1x1_increase"
bottom: "conv4_17"
top: "conv4_18"
}
layer {
name: "conv4_18/relu"
type: "ReLU"
bottom: "conv4_18"
top: "conv4_18"
}
layer {
name: "conv4_19_1x1_reduce"
type: "Convolution"
bottom: "conv4_18"
top: "conv4_19_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_19_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_19_1x1_reduce"
top: "conv4_19_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_19_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_19_1x1_reduce"
top: "conv4_19_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_19_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_19_1x1_reduce"
top: "conv4_19_1x1_reduce"
}
layer {
name: "conv4_19_3x3"
type: "Convolution"
bottom: "conv4_19_1x1_reduce"
top: "conv4_19_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_19_3x3/bn"
type: "BatchNorm"
bottom: "conv4_19_3x3"
top: "conv4_19_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_19_3x3/bn/scale"
type: "Scale"
bottom: "conv4_19_3x3"
top: "conv4_19_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_19_3x3/relu"
type: "ReLU"
bottom: "conv4_19_3x3"
top: "conv4_19_3x3"
}
layer {
name: "conv4_19_1x1_increase"
type: "Convolution"
bottom: "conv4_19_3x3"
top: "conv4_19_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_19_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_19_1x1_increase"
top: "conv4_19_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_19_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_19_1x1_increase"
top: "conv4_19_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_19_global_pool"
type: "Pooling"
bottom: "conv4_19_1x1_increase"
top: "conv4_19_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_19_1x1_down"
type: "Convolution"
bottom: "conv4_19_global_pool"
top: "conv4_19_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_19_1x1_down/relu"
type: "ReLU"
bottom: "conv4_19_1x1_down"
top: "conv4_19_1x1_down"
}
layer {
name: "conv4_19_1x1_up"
type: "Convolution"
bottom: "conv4_19_1x1_down"
top: "conv4_19_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_19_prob"
type: "Sigmoid"
bottom: "conv4_19_1x1_up"
top: "conv4_19_1x1_up"
}
layer {
name: "conv4_19"
type: "Axpy"
bottom: "conv4_19_1x1_up"
bottom: "conv4_19_1x1_increase"
bottom: "conv4_18"
top: "conv4_19"
}
layer {
name: "conv4_19/relu"
type: "ReLU"
bottom: "conv4_19"
top: "conv4_19"
}
layer {
name: "conv4_20_1x1_reduce"
type: "Convolution"
bottom: "conv4_19"
top: "conv4_20_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_20_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_20_1x1_reduce"
top: "conv4_20_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_20_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_20_1x1_reduce"
top: "conv4_20_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_20_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_20_1x1_reduce"
top: "conv4_20_1x1_reduce"
}
layer {
name: "conv4_20_3x3"
type: "Convolution"
bottom: "conv4_20_1x1_reduce"
top: "conv4_20_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_20_3x3/bn"
type: "BatchNorm"
bottom: "conv4_20_3x3"
top: "conv4_20_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_20_3x3/bn/scale"
type: "Scale"
bottom: "conv4_20_3x3"
top: "conv4_20_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_20_3x3/relu"
type: "ReLU"
bottom: "conv4_20_3x3"
top: "conv4_20_3x3"
}
layer {
name: "conv4_20_1x1_increase"
type: "Convolution"
bottom: "conv4_20_3x3"
top: "conv4_20_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_20_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_20_1x1_increase"
top: "conv4_20_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_20_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_20_1x1_increase"
top: "conv4_20_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_20_global_pool"
type: "Pooling"
bottom: "conv4_20_1x1_increase"
top: "conv4_20_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_20_1x1_down"
type: "Convolution"
bottom: "conv4_20_global_pool"
top: "conv4_20_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_20_1x1_down/relu"
type: "ReLU"
bottom: "conv4_20_1x1_down"
top: "conv4_20_1x1_down"
}
layer {
name: "conv4_20_1x1_up"
type: "Convolution"
bottom: "conv4_20_1x1_down"
top: "conv4_20_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_20_prob"
type: "Sigmoid"
bottom: "conv4_20_1x1_up"
top: "conv4_20_1x1_up"
}
layer {
name: "conv4_20"
type: "Axpy"
bottom: "conv4_20_1x1_up"
bottom: "conv4_20_1x1_increase"
bottom: "conv4_19"
top: "conv4_20"
}
layer {
name: "conv4_20/relu"
type: "ReLU"
bottom: "conv4_20"
top: "conv4_20"
}
layer {
name: "conv4_21_1x1_reduce"
type: "Convolution"
bottom: "conv4_20"
top: "conv4_21_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_21_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_21_1x1_reduce"
top: "conv4_21_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_21_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_21_1x1_reduce"
top: "conv4_21_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_21_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_21_1x1_reduce"
top: "conv4_21_1x1_reduce"
}
layer {
name: "conv4_21_3x3"
type: "Convolution"
bottom: "conv4_21_1x1_reduce"
top: "conv4_21_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_21_3x3/bn"
type: "BatchNorm"
bottom: "conv4_21_3x3"
top: "conv4_21_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_21_3x3/bn/scale"
type: "Scale"
bottom: "conv4_21_3x3"
top: "conv4_21_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_21_3x3/relu"
type: "ReLU"
bottom: "conv4_21_3x3"
top: "conv4_21_3x3"
}
layer {
name: "conv4_21_1x1_increase"
type: "Convolution"
bottom: "conv4_21_3x3"
top: "conv4_21_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_21_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_21_1x1_increase"
top: "conv4_21_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_21_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_21_1x1_increase"
top: "conv4_21_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_21_global_pool"
type: "Pooling"
bottom: "conv4_21_1x1_increase"
top: "conv4_21_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_21_1x1_down"
type: "Convolution"
bottom: "conv4_21_global_pool"
top: "conv4_21_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_21_1x1_down/relu"
type: "ReLU"
bottom: "conv4_21_1x1_down"
top: "conv4_21_1x1_down"
}
layer {
name: "conv4_21_1x1_up"
type: "Convolution"
bottom: "conv4_21_1x1_down"
top: "conv4_21_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_21_prob"
type: "Sigmoid"
bottom: "conv4_21_1x1_up"
top: "conv4_21_1x1_up"
}
layer {
name: "conv4_21"
type: "Axpy"
bottom: "conv4_21_1x1_up"
bottom: "conv4_21_1x1_increase"
bottom: "conv4_20"
top: "conv4_21"
}
layer {
name: "conv4_21/relu"
type: "ReLU"
bottom: "conv4_21"
top: "conv4_21"
}
layer {
name: "conv4_22_1x1_reduce"
type: "Convolution"
bottom: "conv4_21"
top: "conv4_22_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_22_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_22_1x1_reduce"
top: "conv4_22_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_22_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_22_1x1_reduce"
top: "conv4_22_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_22_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_22_1x1_reduce"
top: "conv4_22_1x1_reduce"
}
layer {
name: "conv4_22_3x3"
type: "Convolution"
bottom: "conv4_22_1x1_reduce"
top: "conv4_22_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_22_3x3/bn"
type: "BatchNorm"
bottom: "conv4_22_3x3"
top: "conv4_22_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_22_3x3/bn/scale"
type: "Scale"
bottom: "conv4_22_3x3"
top: "conv4_22_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_22_3x3/relu"
type: "ReLU"
bottom: "conv4_22_3x3"
top: "conv4_22_3x3"
}
layer {
name: "conv4_22_1x1_increase"
type: "Convolution"
bottom: "conv4_22_3x3"
top: "conv4_22_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_22_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_22_1x1_increase"
top: "conv4_22_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_22_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_22_1x1_increase"
top: "conv4_22_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_22_global_pool"
type: "Pooling"
bottom: "conv4_22_1x1_increase"
top: "conv4_22_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_22_1x1_down"
type: "Convolution"
bottom: "conv4_22_global_pool"
top: "conv4_22_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_22_1x1_down/relu"
type: "ReLU"
bottom: "conv4_22_1x1_down"
top: "conv4_22_1x1_down"
}
layer {
name: "conv4_22_1x1_up"
type: "Convolution"
bottom: "conv4_22_1x1_down"
top: "conv4_22_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_22_prob"
type: "Sigmoid"
bottom: "conv4_22_1x1_up"
top: "conv4_22_1x1_up"
}
layer {
name: "conv4_22"
type: "Axpy"
bottom: "conv4_22_1x1_up"
bottom: "conv4_22_1x1_increase"
bottom: "conv4_21"
top: "conv4_22"
}
layer {
name: "conv4_22/relu"
type: "ReLU"
bottom: "conv4_22"
top: "conv4_22"
}
layer {
name: "conv4_23_1x1_reduce"
type: "Convolution"
bottom: "conv4_22"
top: "conv4_23_1x1_reduce"
convolution_param {
num_output: 512
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_23_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv4_23_1x1_reduce"
top: "conv4_23_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv4_23_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv4_23_1x1_reduce"
top: "conv4_23_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_23_1x1_reduce/relu"
type: "ReLU"
bottom: "conv4_23_1x1_reduce"
top: "conv4_23_1x1_reduce"
}
layer {
name: "conv4_23_3x3"
type: "Convolution"
bottom: "conv4_23_1x1_reduce"
top: "conv4_23_3x3"
convolution_param {
num_output: 512
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv4_23_3x3/bn"
type: "BatchNorm"
bottom: "conv4_23_3x3"
top: "conv4_23_3x3"
batch_norm_param {
}
}
layer {
name: "conv4_23_3x3/bn/scale"
type: "Scale"
bottom: "conv4_23_3x3"
top: "conv4_23_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_23_3x3/relu"
type: "ReLU"
bottom: "conv4_23_3x3"
top: "conv4_23_3x3"
}
layer {
name: "conv4_23_1x1_increase"
type: "Convolution"
bottom: "conv4_23_3x3"
top: "conv4_23_1x1_increase"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_23_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv4_23_1x1_increase"
top: "conv4_23_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv4_23_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv4_23_1x1_increase"
top: "conv4_23_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv4_23_global_pool"
type: "Pooling"
bottom: "conv4_23_1x1_increase"
top: "conv4_23_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv4_23_1x1_down"
type: "Convolution"
bottom: "conv4_23_global_pool"
top: "conv4_23_1x1_down"
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_23_1x1_down/relu"
type: "ReLU"
bottom: "conv4_23_1x1_down"
top: "conv4_23_1x1_down"
}
layer {
name: "conv4_23_1x1_up"
type: "Convolution"
bottom: "conv4_23_1x1_down"
top: "conv4_23_1x1_up"
convolution_param {
num_output: 1024
kernel_size: 1
stride: 1
}
}
layer {
name: "conv4_23_prob"
type: "Sigmoid"
bottom: "conv4_23_1x1_up"
top: "conv4_23_1x1_up"
}
layer {
name: "conv4_23"
type: "Axpy"
bottom: "conv4_23_1x1_up"
bottom: "conv4_23_1x1_increase"
bottom: "conv4_22"
top: "conv4_23"
}
layer {
name: "conv4_23/relu"
type: "ReLU"
bottom: "conv4_23"
top: "conv4_23"
}
layer {
name: "conv5_1_1x1_reduce"
type: "Convolution"
bottom: "conv4_23"
top: "conv5_1_1x1_reduce"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_1_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv5_1_1x1_reduce"
top: "conv5_1_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv5_1_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv5_1_1x1_reduce"
top: "conv5_1_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_1_1x1_reduce/relu"
type: "ReLU"
bottom: "conv5_1_1x1_reduce"
top: "conv5_1_1x1_reduce"
}
layer {
name: "conv5_1_3x3"
type: "Convolution"
bottom: "conv5_1_1x1_reduce"
top: "conv5_1_3x3"
convolution_param {
num_output: 1024
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 2
}
}
layer {
name: "conv5_1_3x3/bn"
type: "BatchNorm"
bottom: "conv5_1_3x3"
top: "conv5_1_3x3"
batch_norm_param {
}
}
layer {
name: "conv5_1_3x3/bn/scale"
type: "Scale"
bottom: "conv5_1_3x3"
top: "conv5_1_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_1_3x3/relu"
type: "ReLU"
bottom: "conv5_1_3x3"
top: "conv5_1_3x3"
}
layer {
name: "conv5_1_1x1_increase"
type: "Convolution"
bottom: "conv5_1_3x3"
top: "conv5_1_1x1_increase"
convolution_param {
num_output: 2048
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_1_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv5_1_1x1_increase"
top: "conv5_1_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv5_1_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv5_1_1x1_increase"
top: "conv5_1_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_1_global_pool"
type: "Pooling"
bottom: "conv5_1_1x1_increase"
top: "conv5_1_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv5_1_1x1_down"
type: "Convolution"
bottom: "conv5_1_global_pool"
top: "conv5_1_1x1_down"
convolution_param {
num_output: 128
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_1_1x1_down/relu"
type: "ReLU"
bottom: "conv5_1_1x1_down"
top: "conv5_1_1x1_down"
}
layer {
name: "conv5_1_1x1_up"
type: "Convolution"
bottom: "conv5_1_1x1_down"
top: "conv5_1_1x1_up"
convolution_param {
num_output: 2048
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_1_prob"
type: "Sigmoid"
bottom: "conv5_1_1x1_up"
top: "conv5_1_1x1_up"
}
layer {
name: "conv5_1_1x1_proj"
type: "Convolution"
bottom: "conv4_23"
top: "conv5_1_1x1_proj"
convolution_param {
num_output: 2048
bias_term: false
kernel_size: 1
stride: 2
}
}
layer {
name: "conv5_1_1x1_proj/bn"
type: "BatchNorm"
bottom: "conv5_1_1x1_proj"
top: "conv5_1_1x1_proj"
batch_norm_param {
}
}
layer {
name: "conv5_1_1x1_proj/bn/scale"
type: "Scale"
bottom: "conv5_1_1x1_proj"
top: "conv5_1_1x1_proj"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_1"
type: "Axpy"
bottom: "conv5_1_1x1_up"
bottom: "conv5_1_1x1_increase"
bottom: "conv5_1_1x1_proj"
top: "conv5_1"
}
layer {
name: "conv5_1/relu"
type: "ReLU"
bottom: "conv5_1"
top: "conv5_1"
}
layer {
name: "conv5_2_1x1_reduce"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2_1x1_reduce"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_2_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv5_2_1x1_reduce"
top: "conv5_2_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv5_2_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv5_2_1x1_reduce"
top: "conv5_2_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_2_1x1_reduce/relu"
type: "ReLU"
bottom: "conv5_2_1x1_reduce"
top: "conv5_2_1x1_reduce"
}
layer {
name: "conv5_2_3x3"
type: "Convolution"
bottom: "conv5_2_1x1_reduce"
top: "conv5_2_3x3"
convolution_param {
num_output: 1024
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv5_2_3x3/bn"
type: "BatchNorm"
bottom: "conv5_2_3x3"
top: "conv5_2_3x3"
batch_norm_param {
}
}
layer {
name: "conv5_2_3x3/bn/scale"
type: "Scale"
bottom: "conv5_2_3x3"
top: "conv5_2_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_2_3x3/relu"
type: "ReLU"
bottom: "conv5_2_3x3"
top: "conv5_2_3x3"
}
layer {
name: "conv5_2_1x1_increase"
type: "Convolution"
bottom: "conv5_2_3x3"
top: "conv5_2_1x1_increase"
convolution_param {
num_output: 2048
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_2_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv5_2_1x1_increase"
top: "conv5_2_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv5_2_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv5_2_1x1_increase"
top: "conv5_2_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_2_global_pool"
type: "Pooling"
bottom: "conv5_2_1x1_increase"
top: "conv5_2_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv5_2_1x1_down"
type: "Convolution"
bottom: "conv5_2_global_pool"
top: "conv5_2_1x1_down"
convolution_param {
num_output: 128
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_2_1x1_down/relu"
type: "ReLU"
bottom: "conv5_2_1x1_down"
top: "conv5_2_1x1_down"
}
layer {
name: "conv5_2_1x1_up"
type: "Convolution"
bottom: "conv5_2_1x1_down"
top: "conv5_2_1x1_up"
convolution_param {
num_output: 2048
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_2_prob"
type: "Sigmoid"
bottom: "conv5_2_1x1_up"
top: "conv5_2_1x1_up"
}
layer {
name: "conv5_2"
type: "Axpy"
bottom: "conv5_2_1x1_up"
bottom: "conv5_2_1x1_increase"
bottom: "conv5_1"
top: "conv5_2"
}
layer {
name: "conv5_2/relu"
type: "ReLU"
bottom: "conv5_2"
top: "conv5_2"
}
layer {
name: "conv5_3_1x1_reduce"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3_1x1_reduce"
convolution_param {
num_output: 1024
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_3_1x1_reduce/bn"
type: "BatchNorm"
bottom: "conv5_3_1x1_reduce"
top: "conv5_3_1x1_reduce"
batch_norm_param {
}
}
layer {
name: "conv5_3_1x1_reduce/bn/scale"
type: "Scale"
bottom: "conv5_3_1x1_reduce"
top: "conv5_3_1x1_reduce"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_3_1x1_reduce/relu"
type: "ReLU"
bottom: "conv5_3_1x1_reduce"
top: "conv5_3_1x1_reduce"
}
layer {
name: "conv5_3_3x3"
type: "Convolution"
bottom: "conv5_3_1x1_reduce"
top: "conv5_3_3x3"
convolution_param {
num_output: 1024
bias_term: false
pad: 1
kernel_size: 3
group: 32
stride: 1
}
}
layer {
name: "conv5_3_3x3/bn"
type: "BatchNorm"
bottom: "conv5_3_3x3"
top: "conv5_3_3x3"
batch_norm_param {
}
}
layer {
name: "conv5_3_3x3/bn/scale"
type: "Scale"
bottom: "conv5_3_3x3"
top: "conv5_3_3x3"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_3_3x3/relu"
type: "ReLU"
bottom: "conv5_3_3x3"
top: "conv5_3_3x3"
}
layer {
name: "conv5_3_1x1_increase"
type: "Convolution"
bottom: "conv5_3_3x3"
top: "conv5_3_1x1_increase"
convolution_param {
num_output: 2048
bias_term: false
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_3_1x1_increase/bn"
type: "BatchNorm"
bottom: "conv5_3_1x1_increase"
top: "conv5_3_1x1_increase"
batch_norm_param {
}
}
layer {
name: "conv5_3_1x1_increase/bn/scale"
type: "Scale"
bottom: "conv5_3_1x1_increase"
top: "conv5_3_1x1_increase"
scale_param {
bias_term: true
}
}
layer {
name: "conv5_3_global_pool"
type: "Pooling"
bottom: "conv5_3_1x1_increase"
top: "conv5_3_global_pool"
pooling_param {
pool: AVE
engine: CAFFE
global_pooling: true
}
}
layer {
name: "conv5_3_1x1_down"
type: "Convolution"
bottom: "conv5_3_global_pool"
top: "conv5_3_1x1_down"
convolution_param {
num_output: 128
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_3_1x1_down/relu"
type: "ReLU"
bottom: "conv5_3_1x1_down"
top: "conv5_3_1x1_down"
}
layer {
name: "conv5_3_1x1_up"
type: "Convolution"
bottom: "conv5_3_1x1_down"
top: "conv5_3_1x1_up"
convolution_param {
num_output: 2048
kernel_size: 1
stride: 1
}
}
layer {
name: "conv5_3_prob"
type: "Sigmoid"
bottom: "conv5_3_1x1_up"
top: "conv5_3_1x1_up"
}
layer {
name: "conv5_3"
type: "Axpy"
bottom: "conv5_3_1x1_up"
bottom: "conv5_3_1x1_increase"
bottom: "conv5_2"
top: "conv5_3"
}
layer {
name: "conv5_3/relu"
type: "ReLU"
bottom: "conv5_3"
top: "conv5_3"
}
#layer {
# name: "pool5/7x7_s1"
# type: "Pooling"
# bottom: "conv5_3"
# top: "pool5/7x7_s1"
# pooling_param {
# pool: AVE
# kernel_size: 7
# stride: 1
# }
#}
#layer {
# name: "classifier"
# type: "InnerProduct"
# bottom: "pool5/7x7_s1"
# top: "classifier"
# inner_product_param {
# num_output: 1000
# }
#}
#layer {
# name: "prob"
# type: "Softmax"
# bottom: "classifier"
# top: "prob"
#}
############### feature layer ##############
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5_3"
top: "pool5"
pooling_param {
pool: AVE
#kernel_size: 7
#kernel_h: 4
#kernel_w: 3
global_pooling: true
stride: 1
}
}
layer {
name: "fc5"
type: "InnerProduct"
bottom: "pool5"
top: "fc5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 512
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
############### L2-Softmax Loss ##############
layer {
name: "fc5/norm"
type: "Normalize"
bottom: "fc5"
top: "fc5_norm"
normalize_param {
normalize_type: "L2"
}
}
#pre-fixed scale layer, do not learn the scale
layer {
name: "fc5/norm/scale"
type: "Scale"
bottom: "fc5_norm"
top: "fc5_norm"
param {
lr_mult: 0
decay_mult: 0
}
scale_param {
filler {
type: "constant"
value: 50
}
bias_term: false
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "fc5_norm"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 10572
bias_term: false
weight_filler {
type: "xavier"
}
}
}
layer {
name: "softmax_loss"
type: "SoftmaxWithLoss"
bottom: "fc6"
bottom: "label"
top: "softmax_loss"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment