Created
January 27, 2017 09:43
-
-
Save lolongcovas/4a5db993ec2521107d3941499fce9278 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################################# | |
# generated by | |
# python app/imagenet/build_resnet.py -m bottleneck -n 64 -o /tmp/resnet18bottleneck/ -b 2 2 2 2 --no-fc_layers | |
# params: 1.4 Millions | |
# flops: 255 Millions | |
############################################# | |
name: "CaffeNet" | |
layer { | |
name: "data" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TRAIN | |
} | |
# mean pixel / channel-wise mean instead of mean image | |
transform_param { | |
crop_size: 224 | |
mean_value: 104 | |
mean_value: 117 | |
mean_value: 123 | |
mirror: true | |
max_rescale_ratio: 0.1 | |
max_rotate_degree: 10 | |
blur_jitter: true | |
max_gamma_light: 5 | |
base_gamma_light: 5 | |
distortion_factor: 0.0001 | |
debug: false | |
} | |
data_param { | |
source: "imagenet2012/TRAIN2" | |
batch_size: 100 | |
backend: LMDB | |
} | |
} | |
layer { | |
name: "data" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TEST | |
} | |
# mean pixel / channel-wise mean instead of mean image | |
transform_param { | |
crop_size: 224 | |
mean_value: 104 | |
mean_value: 117 | |
mean_value: 123 | |
mirror: false | |
} | |
data_param { | |
source: "imagenet2012/VAL" | |
batch_size: 1 | |
backend: LMDB | |
} | |
} | |
######################################################################################################## | |
##################################### TRAIN FROM TEXT FILE | |
######################################################################################################## | |
# layer { | |
# name: "data" | |
# type: "ImageData" | |
# top: "data" | |
# top: "label" | |
# include { | |
# phase: TRAIN | |
# } | |
# transform_param { | |
# crop_size: 224 | |
# mirror: true | |
# mean_value: 104 | |
# mean_value: 117 | |
# mean_value: 123 | |
# # scale: 128 | |
# } | |
# image_data_param { | |
# source: "/home/wideeyes/proyectos/caffe/data/ilsvrc12/train.txt" | |
# batch_size: 100 | |
# shuffle: true | |
# new_height: 256 | |
# new_width: 256 | |
# root_folder: "/media/wideeyes-data/database/imagenet2012/train/" | |
# } | |
# } | |
# layer { | |
# name: "data" | |
# type: "ImageData" | |
# top: "data" | |
# top: "label" | |
# include { | |
# phase: TEST | |
# } | |
# transform_param { | |
# crop_size: 224 | |
# mirror: false | |
# mean_value: 104 | |
# mean_value: 117 | |
# mean_value: 123 | |
# # scale: 128 | |
# } | |
# image_data_param { | |
# source: "/home/wideeyes/proyectos/caffe/data/ilsvrc12/val.txt" | |
# batch_size: 10 | |
# #shuffle: true | |
# new_height: 256 | |
# new_width: 256 | |
# root_folder: "/media/wideeyes-data/database/imagenet2012/val/" | |
# } | |
# } | |
layer { | |
name: "conv_1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv_1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 3 | |
kernel_size: 7 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_1" | |
type: "BatchNorm" | |
bottom: "conv_1" | |
top: "conv_1" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_1" | |
type: "Scale" | |
bottom: "conv_1" | |
top: "conv_1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_1" | |
type: "ReLU" | |
bottom: "conv_1" | |
top: "conv_1" | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "conv_1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "conv_stage0_block0_proj_shortcut" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "conv_stage0_block0_proj_shortcut" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block0_proj_shortcut" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block0_proj_shortcut" | |
top: "conv_stage0_block0_proj_shortcut" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block0_proj_shortcut" | |
type: "Scale" | |
bottom: "conv_stage0_block0_proj_shortcut" | |
top: "conv_stage0_block0_proj_shortcut" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv_stage0_block0_branch2a" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "conv_stage0_block0_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block0_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block0_branch2a" | |
top: "conv_stage0_block0_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block0_branch2a" | |
type: "Scale" | |
bottom: "conv_stage0_block0_branch2a" | |
top: "conv_stage0_block0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage0_block0_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage0_block0_branch2a" | |
top: "conv_stage0_block0_branch2a" | |
} | |
layer { | |
name: "conv_stage0_block0_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage0_block0_branch2a" | |
top: "conv_stage0_block0_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block0_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block0_branch2b" | |
top: "conv_stage0_block0_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block0_branch2b" | |
type: "Scale" | |
bottom: "conv_stage0_block0_branch2b" | |
top: "conv_stage0_block0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage0_block0_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage0_block0_branch2b" | |
top: "conv_stage0_block0_branch2b" | |
} | |
layer { | |
name: "conv_stage0_block0_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage0_block0_branch2b" | |
top: "conv_stage0_block0_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block0_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block0_branch2c" | |
top: "conv_stage0_block0_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block0_branch2c" | |
type: "Scale" | |
bottom: "conv_stage0_block0_branch2c" | |
top: "conv_stage0_block0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage0_block0" | |
type: "Eltwise" | |
bottom: "conv_stage0_block0_proj_shortcut" | |
bottom: "conv_stage0_block0_branch2c" | |
top: "eltwise_stage0_block0" | |
} | |
layer { | |
name: "relu_stage0_block0" | |
type: "ReLU" | |
bottom: "eltwise_stage0_block0" | |
top: "eltwise_stage0_block0" | |
} | |
layer { | |
name: "conv_stage0_block1_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage0_block0" | |
top: "conv_stage0_block1_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block1_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block1_branch2a" | |
top: "conv_stage0_block1_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block1_branch2a" | |
type: "Scale" | |
bottom: "conv_stage0_block1_branch2a" | |
top: "conv_stage0_block1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage0_block1_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage0_block1_branch2a" | |
top: "conv_stage0_block1_branch2a" | |
} | |
layer { | |
name: "conv_stage0_block1_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage0_block1_branch2a" | |
top: "conv_stage0_block1_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block1_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block1_branch2b" | |
top: "conv_stage0_block1_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block1_branch2b" | |
type: "Scale" | |
bottom: "conv_stage0_block1_branch2b" | |
top: "conv_stage0_block1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage0_block1_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage0_block1_branch2b" | |
top: "conv_stage0_block1_branch2b" | |
} | |
layer { | |
name: "conv_stage0_block1_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage0_block1_branch2b" | |
top: "conv_stage0_block1_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage0_block1_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage0_block1_branch2c" | |
top: "conv_stage0_block1_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage0_block1_branch2c" | |
type: "Scale" | |
bottom: "conv_stage0_block1_branch2c" | |
top: "conv_stage0_block1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage0_block1" | |
type: "Eltwise" | |
bottom: "eltwise_stage0_block0" | |
bottom: "conv_stage0_block1_branch2c" | |
top: "eltwise_stage0_block1" | |
} | |
layer { | |
name: "relu_stage0_block1" | |
type: "ReLU" | |
bottom: "eltwise_stage0_block1" | |
top: "eltwise_stage0_block1" | |
} | |
layer { | |
name: "conv_stage1_block0_proj_shortcut" | |
type: "Convolution" | |
bottom: "eltwise_stage0_block1" | |
top: "conv_stage1_block0_proj_shortcut" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block0_proj_shortcut" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block0_proj_shortcut" | |
top: "conv_stage1_block0_proj_shortcut" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block0_proj_shortcut" | |
type: "Scale" | |
bottom: "conv_stage1_block0_proj_shortcut" | |
top: "conv_stage1_block0_proj_shortcut" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv_stage1_block0_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage0_block1" | |
top: "conv_stage1_block0_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block0_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block0_branch2a" | |
top: "conv_stage1_block0_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block0_branch2a" | |
type: "Scale" | |
bottom: "conv_stage1_block0_branch2a" | |
top: "conv_stage1_block0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage1_block0_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage1_block0_branch2a" | |
top: "conv_stage1_block0_branch2a" | |
} | |
layer { | |
name: "conv_stage1_block0_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage1_block0_branch2a" | |
top: "conv_stage1_block0_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block0_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block0_branch2b" | |
top: "conv_stage1_block0_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block0_branch2b" | |
type: "Scale" | |
bottom: "conv_stage1_block0_branch2b" | |
top: "conv_stage1_block0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage1_block0_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage1_block0_branch2b" | |
top: "conv_stage1_block0_branch2b" | |
} | |
layer { | |
name: "conv_stage1_block0_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage1_block0_branch2b" | |
top: "conv_stage1_block0_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block0_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block0_branch2c" | |
top: "conv_stage1_block0_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block0_branch2c" | |
type: "Scale" | |
bottom: "conv_stage1_block0_branch2c" | |
top: "conv_stage1_block0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage1_block0" | |
type: "Eltwise" | |
bottom: "conv_stage1_block0_proj_shortcut" | |
bottom: "conv_stage1_block0_branch2c" | |
top: "eltwise_stage1_block0" | |
} | |
layer { | |
name: "relu_stage1_block0" | |
type: "ReLU" | |
bottom: "eltwise_stage1_block0" | |
top: "eltwise_stage1_block0" | |
} | |
layer { | |
name: "conv_stage1_block1_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage1_block0" | |
top: "conv_stage1_block1_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block1_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block1_branch2a" | |
top: "conv_stage1_block1_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block1_branch2a" | |
type: "Scale" | |
bottom: "conv_stage1_block1_branch2a" | |
top: "conv_stage1_block1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage1_block1_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage1_block1_branch2a" | |
top: "conv_stage1_block1_branch2a" | |
} | |
layer { | |
name: "conv_stage1_block1_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage1_block1_branch2a" | |
top: "conv_stage1_block1_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block1_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block1_branch2b" | |
top: "conv_stage1_block1_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block1_branch2b" | |
type: "Scale" | |
bottom: "conv_stage1_block1_branch2b" | |
top: "conv_stage1_block1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage1_block1_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage1_block1_branch2b" | |
top: "conv_stage1_block1_branch2b" | |
} | |
layer { | |
name: "conv_stage1_block1_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage1_block1_branch2b" | |
top: "conv_stage1_block1_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage1_block1_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage1_block1_branch2c" | |
top: "conv_stage1_block1_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage1_block1_branch2c" | |
type: "Scale" | |
bottom: "conv_stage1_block1_branch2c" | |
top: "conv_stage1_block1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage1_block1" | |
type: "Eltwise" | |
bottom: "eltwise_stage1_block0" | |
bottom: "conv_stage1_block1_branch2c" | |
top: "eltwise_stage1_block1" | |
} | |
layer { | |
name: "relu_stage1_block1" | |
type: "ReLU" | |
bottom: "eltwise_stage1_block1" | |
top: "eltwise_stage1_block1" | |
} | |
layer { | |
name: "conv_stage2_block0_proj_shortcut" | |
type: "Convolution" | |
bottom: "eltwise_stage1_block1" | |
top: "conv_stage2_block0_proj_shortcut" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block0_proj_shortcut" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block0_proj_shortcut" | |
top: "conv_stage2_block0_proj_shortcut" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block0_proj_shortcut" | |
type: "Scale" | |
bottom: "conv_stage2_block0_proj_shortcut" | |
top: "conv_stage2_block0_proj_shortcut" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv_stage2_block0_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage1_block1" | |
top: "conv_stage2_block0_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block0_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block0_branch2a" | |
top: "conv_stage2_block0_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block0_branch2a" | |
type: "Scale" | |
bottom: "conv_stage2_block0_branch2a" | |
top: "conv_stage2_block0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage2_block0_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage2_block0_branch2a" | |
top: "conv_stage2_block0_branch2a" | |
} | |
layer { | |
name: "conv_stage2_block0_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage2_block0_branch2a" | |
top: "conv_stage2_block0_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block0_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block0_branch2b" | |
top: "conv_stage2_block0_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block0_branch2b" | |
type: "Scale" | |
bottom: "conv_stage2_block0_branch2b" | |
top: "conv_stage2_block0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage2_block0_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage2_block0_branch2b" | |
top: "conv_stage2_block0_branch2b" | |
} | |
layer { | |
name: "conv_stage2_block0_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage2_block0_branch2b" | |
top: "conv_stage2_block0_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block0_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block0_branch2c" | |
top: "conv_stage2_block0_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block0_branch2c" | |
type: "Scale" | |
bottom: "conv_stage2_block0_branch2c" | |
top: "conv_stage2_block0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage2_block0" | |
type: "Eltwise" | |
bottom: "conv_stage2_block0_proj_shortcut" | |
bottom: "conv_stage2_block0_branch2c" | |
top: "eltwise_stage2_block0" | |
} | |
layer { | |
name: "relu_stage2_block0" | |
type: "ReLU" | |
bottom: "eltwise_stage2_block0" | |
top: "eltwise_stage2_block0" | |
} | |
layer { | |
name: "conv_stage2_block1_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage2_block0" | |
top: "conv_stage2_block1_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block1_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block1_branch2a" | |
top: "conv_stage2_block1_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block1_branch2a" | |
type: "Scale" | |
bottom: "conv_stage2_block1_branch2a" | |
top: "conv_stage2_block1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage2_block1_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage2_block1_branch2a" | |
top: "conv_stage2_block1_branch2a" | |
} | |
layer { | |
name: "conv_stage2_block1_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage2_block1_branch2a" | |
top: "conv_stage2_block1_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block1_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block1_branch2b" | |
top: "conv_stage2_block1_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block1_branch2b" | |
type: "Scale" | |
bottom: "conv_stage2_block1_branch2b" | |
top: "conv_stage2_block1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage2_block1_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage2_block1_branch2b" | |
top: "conv_stage2_block1_branch2b" | |
} | |
layer { | |
name: "conv_stage2_block1_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage2_block1_branch2b" | |
top: "conv_stage2_block1_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage2_block1_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage2_block1_branch2c" | |
top: "conv_stage2_block1_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage2_block1_branch2c" | |
type: "Scale" | |
bottom: "conv_stage2_block1_branch2c" | |
top: "conv_stage2_block1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage2_block1" | |
type: "Eltwise" | |
bottom: "eltwise_stage2_block0" | |
bottom: "conv_stage2_block1_branch2c" | |
top: "eltwise_stage2_block1" | |
} | |
layer { | |
name: "relu_stage2_block1" | |
type: "ReLU" | |
bottom: "eltwise_stage2_block1" | |
top: "eltwise_stage2_block1" | |
} | |
layer { | |
name: "conv_stage3_block0_proj_shortcut" | |
type: "Convolution" | |
bottom: "eltwise_stage2_block1" | |
top: "conv_stage3_block0_proj_shortcut" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block0_proj_shortcut" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block0_proj_shortcut" | |
top: "conv_stage3_block0_proj_shortcut" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block0_proj_shortcut" | |
type: "Scale" | |
bottom: "conv_stage3_block0_proj_shortcut" | |
top: "conv_stage3_block0_proj_shortcut" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv_stage3_block0_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage2_block1" | |
top: "conv_stage3_block0_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block0_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block0_branch2a" | |
top: "conv_stage3_block0_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block0_branch2a" | |
type: "Scale" | |
bottom: "conv_stage3_block0_branch2a" | |
top: "conv_stage3_block0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage3_block0_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage3_block0_branch2a" | |
top: "conv_stage3_block0_branch2a" | |
} | |
layer { | |
name: "conv_stage3_block0_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage3_block0_branch2a" | |
top: "conv_stage3_block0_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block0_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block0_branch2b" | |
top: "conv_stage3_block0_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block0_branch2b" | |
type: "Scale" | |
bottom: "conv_stage3_block0_branch2b" | |
top: "conv_stage3_block0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage3_block0_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage3_block0_branch2b" | |
top: "conv_stage3_block0_branch2b" | |
} | |
layer { | |
name: "conv_stage3_block0_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage3_block0_branch2b" | |
top: "conv_stage3_block0_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block0_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block0_branch2c" | |
top: "conv_stage3_block0_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block0_branch2c" | |
type: "Scale" | |
bottom: "conv_stage3_block0_branch2c" | |
top: "conv_stage3_block0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage3_block0" | |
type: "Eltwise" | |
bottom: "conv_stage3_block0_proj_shortcut" | |
bottom: "conv_stage3_block0_branch2c" | |
top: "eltwise_stage3_block0" | |
} | |
layer { | |
name: "relu_stage3_block0" | |
type: "ReLU" | |
bottom: "eltwise_stage3_block0" | |
top: "eltwise_stage3_block0" | |
} | |
layer { | |
name: "conv_stage3_block1_branch2a" | |
type: "Convolution" | |
bottom: "eltwise_stage3_block0" | |
top: "conv_stage3_block1_branch2a" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block1_branch2a" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block1_branch2a" | |
top: "conv_stage3_block1_branch2a" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block1_branch2a" | |
type: "Scale" | |
bottom: "conv_stage3_block1_branch2a" | |
top: "conv_stage3_block1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage3_block1_branch2a" | |
type: "ReLU" | |
bottom: "conv_stage3_block1_branch2a" | |
top: "conv_stage3_block1_branch2a" | |
} | |
layer { | |
name: "conv_stage3_block1_branch2b" | |
type: "Convolution" | |
bottom: "conv_stage3_block1_branch2a" | |
top: "conv_stage3_block1_branch2b" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block1_branch2b" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block1_branch2b" | |
top: "conv_stage3_block1_branch2b" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block1_branch2b" | |
type: "Scale" | |
bottom: "conv_stage3_block1_branch2b" | |
top: "conv_stage3_block1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu_stage3_block1_branch2b" | |
type: "ReLU" | |
bottom: "conv_stage3_block1_branch2b" | |
top: "conv_stage3_block1_branch2b" | |
} | |
layer { | |
name: "conv_stage3_block1_branch2c" | |
type: "Convolution" | |
bottom: "conv_stage3_block1_branch2b" | |
top: "conv_stage3_block1_branch2c" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "bn_stage3_block1_branch2c" | |
type: "BatchNorm" | |
bottom: "conv_stage3_block1_branch2c" | |
top: "conv_stage3_block1_branch2c" | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 0 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
use_global_stats: false | |
} | |
} | |
layer { | |
name: "scale_stage3_block1_branch2c" | |
type: "Scale" | |
bottom: "conv_stage3_block1_branch2c" | |
top: "conv_stage3_block1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "eltwise_stage3_block1" | |
type: "Eltwise" | |
bottom: "eltwise_stage3_block0" | |
bottom: "conv_stage3_block1_branch2c" | |
top: "eltwise_stage3_block1" | |
} | |
layer { | |
name: "relu_stage3_block1" | |
type: "ReLU" | |
bottom: "eltwise_stage3_block1" | |
top: "eltwise_stage3_block1" | |
} | |
layer { | |
name: "pool" | |
type: "Pooling" | |
bottom: "eltwise_stage3_block1" | |
top: "pool" | |
pooling_param { | |
pool: AVE | |
kernel_size: 7 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "fc1000" | |
type: "InnerProduct" | |
bottom: "pool" | |
top: "fc1000" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
inner_product_param { | |
num_output: 1000 | |
weight_filler { | |
type: "msra" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "loss" | |
type: "SoftmaxWithLoss" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "loss" | |
} | |
layer { | |
name: "accuracy" | |
type: "Accuracy" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "accuracy" | |
include { | |
phase: TEST | |
} | |
} | |
layer { | |
name: "accuracy-top5" | |
type: "Accuracy" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "accuracy-top5" | |
include { | |
phase: TEST | |
} | |
accuracy_param { | |
top_k: 5 | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment