Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save jyegerlehner/1aaf9ed5791583dadb36 to your computer and use it in GitHub Desktop.
Save jyegerlehner/1aaf9ed5791583dadb36 to your computer and use it in GitHub Desktop.
name: "Inception_3a"
input: "maxpool_2"
input_dim: 1
input_dim: 192
input_dim: 28
input_dim: 28
layers {
name: "1x1_conv"
type: CONVOLUTION
bottom: "maxpool_2"
top: "1x1_conv"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 64
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "1x1_relu"
type: RELU
bottom: "1x1_conv"
top: "1x1_relu"
}
layers {
name: "3x3reduce_conv"
type: CONVOLUTION
bottom: "maxpool_2"
top: "3x3reduce_conv"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 96
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "3x3reduce_relu"
type: RELU
bottom: "3x3reduce_conv"
top: "3x3reduce_relu"
}
layers {
name: "3x3_conv"
type: CONVOLUTION
bottom: "3x3reduce_relu"
top: "3x3_conv"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "3x3_relu"
type: RELU
bottom: "3x3_conv"
top: "3x3_relu"
}
layers {
name: "5x5reduce_conv"
type: CONVOLUTION
bottom: "maxpool_2"
top: "5x5reduce_conv"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 16
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "5x5reduce_relu"
type: RELU
bottom: "5x5reduce_conv"
top: "5x5reduce_relu"
}
layers {
name: "5x5_conv"
type: CONVOLUTION
bottom: "5x5reduce_relu"
top: "5x5_conv"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "5x5_relu"
type: RELU
bottom: "5x5_conv"
top: "5x5_relu"
}
layers {
name: "maxpool_3x3"
type: POOLING
bottom: "maxpool_2"
top: "maxpool_3x3"
pooling_param {
pool: MAX
pad: 1
kernel_size: 3
stride: 1
}
}
layers {
name: "poolproj_1x1conv"
type: CONVOLUTION
bottom: "maxpool_3x3"
top: "poolproj_1x1conv"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 32
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "poolproj_relu"
type: RELU
bottom: "poolproj_1x1conv"
top: "poolproj_relu"
}
layers {
name: "DepthConcatenation"
type: CONCAT
concat_param {
concat_dim: 1
}
bottom: "1x1_relu"
bottom: "3x3_relu"
bottom: "5x5_relu"
bottom: "poolproj_relu"
top: "inception3a_Output"
}
@Shaunakde
Copy link

Just putting the paper down: http://arxiv.org/pdf/1409.4842v1.pdf

@aravind-vasudevan
Copy link

@jyegerlehner thanks for this implementation. I am implementing the inception module in C and I have some issues understanding this fully. If you can shed some light on any of the following it would be great:

  1. Where do we get the filters and number of filters to apply from? In Table 1 of the paper, the inception (3a) module seems to get as input a 28x28x192 matrix. To it, 64 1x1 filters (I am assuming each of them have 192 channels each?) are applied; 128 3x3 filters (each filter with 96(as there are 96 1x1 before that) channels); and 32 5x5 filters (each filter with 16 channels). Are these numbers (the number of filters for each type) just made up the authors through trial and error or is there more general way of generating these numbers?
  2. What is "AveragePool", "SoftmaxActivation" and "FC"and how does one go about implementing it?

I would be very obliged if you could guide me in the right direction!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment