Skip to content

Instantly share code, notes, and snippets.

@rhee
Created December 13, 2019 08:39
Show Gist options
  • Save rhee/5defd3d0d101979546bf4fdd4550cd5d to your computer and use it in GitHub Desktop.
Save rhee/5defd3d0d101979546bf4fdd4550cd5d to your computer and use it in GitHub Desktop.
manipulate_tf_pipeline_config.py
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0+dev
# kernelspec:
# display_name: Python [conda env:TFVDL2-dev]
# language: python
# name: conda-env-TFVDL2-dev-py
# ---
# %%
# %run ../ml-workspace-common/ml-workspace-common.py
cuda_select_best_memfree()
# %%
import sys
sys.path.insert(0,expanduser('~/Programs/VDLServer/VDL_InferenceProxy3/engines/tf/lib'))
# %%
from google.protobuf import text_format
from object_detection.protos import pipeline_pb2
## 어떻게 pipeline.config 를 읽어 오는가
config_file = 'cliprojects/107/Default_Net/data/pipeline.config'
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_file,encoding='utf-8') as f:
text_format.Merge(str(f.read()),config)
# %%
## 초기 내용 확인
config.train_config
# %%
## 단순값 설정은 그냥 assignment 로 충분
config.train_config.num_steps = 100
# %%
config.train_config
# %%
## optimizer message를 먼저 정하고, learning_rate 및 manual_step_learning_rate 변경 방법
opt_cfg = config.train_config.optimizer.momentum_optimizer
## 기존에 initial_learning_rate 및 schedule 값이 있었을 수 있으니 Clear() 필요
opt_cfg.learning_rate.manual_step_learning_rate.Clear()
## 이제 opt_cfg 에 뭔가 하면 기존 optimizre 선택은 날아감. 처음부터 momentum_optimizer 였으면 상관없음.
opt_cfg.learning_rate.manual_step_learning_rate.initial_learning_rate = 0.001
## 값 1개 추가
opt_cfg.learning_rate.manual_step_learning_rate.schedule.add(step=500,learning_rate=1e-6)
# %%
opt_cfg
# %%
## 다른 방법으로 repeat message 추가
new_schedule = opt_cfg.learning_rate.manual_step_learning_rate.schedule.add()
new_schedule.step = 1000
new_schedule.learning_rate = 1e-5
new_schedule = opt_cfg.learning_rate.manual_step_learning_rate.schedule.add()
new_schedule.step = 5000
new_schedule.learning_rate = 1e-6
# %%
opt_cfg
# %%
## data_augmentation_options 의 repeat message 조작 방법
config.train_config.ClearField("data_augmentation_options")
## 새로운 data_augmentation_options 추가
config.train_config.data_augmentation_options.add().random_horizontal_flip.Clear()
# %%
config.train_config
# %%
config.train_config.data_augmentation_options.add().random_jitter_boxes.Clear()
# %%
config.train_config
# %%
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment