Skip to content

Instantly share code, notes, and snippets.

@rsandler00
Created September 12, 2019 18:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rsandler00/d3cc04912fcee6dd90cce03020f3e1a0 to your computer and use it in GitHub Desktop.
Save rsandler00/d3cc04912fcee6dd90cce03020f3e1a0 to your computer and use it in GitHub Desktop.
# SSDLite with Mobilenet v2 configuration for SANATA dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
ssd {
# PATH_TO_BE_CONFIGURED
num_classes: 6
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.05
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
}
}
image_resizer {
fixed_shape_resizer {
height: 512
width: 512
}
}
box_predictor {
convolutional_box_predictor {
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.8
kernel_size: 3
use_depthwise: true
box_code_size: 4
apply_sigmoid_to_scores: false
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
train: true,
scale: true,
center: true,
decay: 0.9997,
epsilon: 0.001,
}
}
}
}
feature_extractor {
type: 'ssd_mobilenet_v2'
min_depth: 16
depth_multiplier: .7 # CHANGED: was 1.0 originally
use_depthwise: true
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
train: true,
scale: true,
center: true,
decay: 0.9997,
epsilon: 0.001,
}
}
}
loss {
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
hard_example_miner {
num_hard_examples: 3000
iou_threshold: 0.99
loss_type: CLASSIFICATION
max_negatives_per_positive: 3
min_negatives_per_image: 3
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
encode_background_as_zeros: true
normalize_loc_loss_by_codesize: true
inplace_batchnorm_update: true
freeze_batchnorm: false
}
}
train_config: {
batch_size: 8
optimizer {
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
# In Huang et al., 2017 the params (for MobileNetv1) were:
# initial_learning_rate=0.004, decay_steps=800k, decay_factor=0.95
# JKJeung used:
# initial_learning_rate=0.004, decay_steps=1000, decay_factor=0.8
initial_learning_rate: 0.004
decay_steps: 10000
decay_factor: 0.9
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
}
# PATH_TO_BE_CONFIGURED
# Original is "published_model_checkpoints/ssdlite_mobilenet_v2_coco_2018_05_09/model.ckpt"
fine_tune_checkpoint: ""
fine_tune_checkpoint_type: "detection"
# set below to true to restore ALL variables in model checkpoint
load_all_detection_checkpoint_vars: true
summarize_gradients: false
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the line below or set to 0 to train indefinitely.
num_steps: 0
data_augmentation_options {
random_horizontal_flip {
}
random_image_scale {
max_scale_ratio : 1.5
}
}
data_augmentation_options {
ssd_random_crop {
}
}
}
train_input_reader: {
tf_record_input_reader {
# PATH_TO_BE_CONFIGURED
input_path: "data/TETMMI_train.tfrecord"
}
# PATH_TO_BE_CONFIGURED
label_map_path: "labelmaps/TETMMI_label_map.pbtxt"
}
eval_config: {
num_examples: 100
num_visualizations: 10
# Note that eval conducted in max(eval_interval_secs, `save_checkpoints_steps`)
# where `save_checkpoints_steps` is in `model_main.py` (line 65)
eval_interval_secs: 3
# PATH_TO_BE_CONFIGURED
visualization_export_dir: "results/eval_results"
export_path: "results/eval_results"
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
# whether to include per-category metrics. THIS IS NOT YET IMPLEMENTED: https://stackoverflow.com/questions/51848274/show-per-category-metrics-in-tensorboard
metrics_set: "coco_detection_metrics"
include_metrics_per_category: true
}
eval_input_reader: {
tf_record_input_reader {
# PATH_TO_BE_CONFIGURED
input_path: "data/TETMMI_val.tfrecord"
}
# PATH_TO_BE_CONFIGURED
label_map_path: "labelmaps/TETMMI_label_map.pbtxt"
shuffle: false
num_readers: 1
}
# Required for quantization-aware training (needed for deployment to Coral)
graph_rewriter {
quantization {
delay: 18000
weight_bits: 8
activation_bits: 8
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment