Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Large DAU-ConvNet: ImageNet pre-trained model of DAU-ConvNet with AlexNet architecture

Information

An DAU-ConvNet with AlexNet architecture with conv3,conv4 and conv5 replaced with DAU-Conv layer. Corresponds to "Large DAU-ConvNet" in CVPR18 paper using larger number of DAUs per channel.

Model is trained on ilsvrc12 and achieves around 57.3% top-1 accuracy.

caffemodel: AlexNet-DAU-ConvNet-large

caffemodel_url: http://box.vicos.si/skokec/caffe-dau-models/alexnet_dau_large.caffemodel

mean_file: use standard ilsvrc 2012 mean file provided by caffe

Caffe compatibility

Compatibile only with https://github.com/skokec/DAU-ConvNet-caffe

name: "AlexNet-DAU-large"
force_backward: true
layer {
name: "data"
type: "Input"
top: "data"
input_param { shape: { dim: 1 dim: 3 dim: 227 dim: 227 } }
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 96
bias_term: false
pad: 5
kernel_size: 11
group: 1
stride: 4
weight_filler {
type: "xavier"
}
dilation: 1
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "norm1"
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 9.99999974738e-05
beta: 0.75
}
}
layer {
name: "conv2_1"
type: "DAUConvolution"
bottom: "norm1"
top: "conv2_1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 5000.0
decay_mult: 0.0
}
param {
lr_mult: 5000.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 128
bias_term: true
number_units: 2
number_units: 3
pad: 3
kernel_size: 7
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 6.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu2_1"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
name: "conv2_2"
type: "DAUConvolution"
bottom: "norm1"
top: "conv2_2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 5000.0
decay_mult: 0.0
}
param {
lr_mult: 5000.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 128
bias_term: true
number_units: 2
number_units: 3
pad: 3
kernel_size: 7
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 6.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "conv2"
type: "Concat"
bottom: "conv2_1"
bottom: "conv2_2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "norm2"
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 9.99999974738e-05
beta: 0.75
}
}
layer {
name: "conv3"
type: "DAUConvolution"
bottom: "norm2"
top: "conv3"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 5000.0
decay_mult: 0.0
}
param {
lr_mult: 5000.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 384
bias_term: true
number_units: 2
number_units: 2
pad: 4
kernel_size: 9
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 8.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4_1"
type: "DAUConvolution"
bottom: "conv3"
top: "conv4_1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 192
bias_term: true
number_units: 2
pad: 3
kernel_size: 7
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 6.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu4_1"
type: "ReLU"
bottom: "conv4_1"
top: "conv4_1"
}
layer {
name: "conv4_2"
type: "DAUConvolution"
bottom: "conv3"
top: "conv4_2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 192
bias_term: true
number_units: 2
pad: 3
kernel_size: 7
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 6.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu4_2"
type: "ReLU"
bottom: "conv4_2"
top: "conv4_2"
}
layer {
name: "conv4"
type: "Concat"
bottom: "conv4_1"
bottom: "conv4_2"
top: "conv4"
}
layer {
name: "conv5_1"
type: "DAUConvolution"
bottom: "conv4"
top: "conv5_1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 128
bias_term: true
number_units: 2
pad: 3
kernel_size: 7
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 6.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu5_1"
type: "ReLU"
bottom: "conv5_1"
top: "conv5_1"
}
layer {
name: "conv5_2"
type: "DAUConvolution"
bottom: "conv4"
top: "conv5_2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 500.0
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
param {
lr_mult: 2.0
}
dau_conv_param {
num_output: 128
bias_term: true
number_units: 2
pad: 3
kernel_size: 7
stride: 1
weight_filler {
type: "gaussian"
std: 0.00999999977648
}
bias_filler {
type: "constant"
value: 1.0
}
mu_filler {
type: "uniform"
min: 2.0
max: 6.0
}
sigma_filler {
type: "constant"
value: 0.5
}
component_border_bound: 0.0
sigma_lower_bound: 0.300000011921
engine: CUDNN
}
}
layer {
name: "relu5_2"
type: "ReLU"
bottom: "conv5_2"
top: "conv5_2"
}
layer {
name: "conv5"
type: "Concat"
bottom: "conv5_1"
bottom: "conv5_2"
top: "conv5"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv5"
top: "pool3"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool3"
top: "fc6"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
inner_product_param {
num_output: 4096
bias_term: true
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "fc6_d"
type: "Dropout"
bottom: "fc6"
top: "fc6_d"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6_d"
top: "fc7"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
inner_product_param {
num_output: 4096
bias_term: true
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "fc7_d"
type: "Dropout"
bottom: "fc7"
top: "fc7_d"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7_d"
top: "fc8"
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
}
}
layer {
name: "loss"
type: "Softmax"
bottom: "fc8"
top: "loss"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.