Last active
September 14, 2018 19:12
-
-
Save so77id/92e56471913b4457e2397b8fb146a9d7 to your computer and use it in GitHub Desktop.
Makefile Experiments
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#FUNCTION | |
define cecho | |
@echo "\033[92m$(1)\033[0m" | |
endef | |
# rcnn-fer Docker Makefile | |
PROGRAM="RCNN-FER" | |
CPU_REGISTRY_URL=so77id | |
GPU_REGISTRY_URL=so77id | |
CPU_VERSION=latest-cpu | |
GPU_VERSION=latest-gpu | |
CPU_DOCKER_IMAGE=mrodriguez-recod | |
GPU_DOCKER_IMAGE=mrodriguez-recod | |
DOCKER_USER=mrodriguez | |
DOCKER_CONTAINER_NAME=mrodriguez | |
############################################################################## | |
############################# Exposed vars #################################### | |
############################################################################## | |
# enable/disable GPU usage | |
GPU=false | |
# Config file used to experiment | |
CONFIG_FILE="" | |
# List of cuda devises | |
CUDA_VISIBLE_DEVICES=0 | |
# Name of dataset to process | |
PROCESS_DATASET="" | |
#Path to src folder | |
HOST_CPU_SOURCE_PATH = "" | |
HOST_GPU_SOURCE_PATH = "" | |
# Path to dataset | |
HOST_CPU_DATASETS_PATH = "" | |
HOST_GPU_DATASETS_PATH = "" | |
# Path to metada | |
HOST_CPU_METADATA_PATH = "" | |
HOST_GPU_METADATA_PATH = "" | |
############################################################################## | |
############################# DOCKER VARS #################################### | |
############################################################################## | |
# COMMANDS | |
DOCKER_COMMAND=docker | |
NVIDIA_DOCKER_COMMAND=nvidia-docker | |
#HOST VARS | |
LOCALHOST_IP=127.0.0.1 | |
HOST_TENSORBOARD_PORT=26006 | |
HOST_NOTEBOOK_PORT=28888 | |
#HOST CPU VARS | |
HOST_CPU_SOURCE_PATH=$(shell pwd) | |
HOST_CPU_DATASETS_PATH=/Users/so77id/Desktop/workspace/master/rcnn_implementations/datasets | |
HOST_CPU_METADATA_PATH=/Users/so77id/Desktop/workspace/master/rcnn_implementations/metadata | |
#HOST GPU PATHS | |
HOST_GPU_SOURCE_PATH=$(shell pwd) | |
HOST_GPU_DATASETS_PATH=/datasets/$(USER) | |
HOST_GPU_METADATA_PATH=/work/$(USER)/metadata/fer | |
#IMAGE VARS | |
IMAGE_TENSORBOARD_PORT=6006 | |
IMAGE_NOTEBOOK_PORT=8888 | |
IMAGE_SOURCE_PATH=/home/src | |
IMAGE_DATASETS_PATH=/home/datasets | |
IMAGE_METADATA_PATH=/home/metadata | |
# DOCKER vars | |
EXP_NAME="" | |
# VOLUMES | |
CPU_DOCKER_VOLUMES = --volume=$(HOST_CPU_SOURCE_PATH):$(IMAGE_SOURCE_PATH) \ | |
--volume=$(HOST_CPU_DATASETS_PATH):$(IMAGE_DATASETS_PATH) \ | |
--volume=$(HOST_CPU_METADATA_PATH):$(IMAGE_METADATA_PATH) \ | |
--workdir=$(IMAGE_SOURCE_PATH) | |
GPU_DOCKER_VOLUMES = --volume=$(HOST_GPU_SOURCE_PATH):$(IMAGE_SOURCE_PATH) \ | |
--volume=$(HOST_GPU_DATASETS_PATH):$(IMAGE_DATASETS_PATH) \ | |
--volume=$(HOST_GPU_METADATA_PATH):$(IMAGE_METADATA_PATH) \ | |
--workdir=$(IMAGE_SOURCE_PATH) | |
DOCKER_TENSORBOARD_PORTS = -p $(LOCALHOST_IP):$(HOST_TENSORBOARD_PORT):$(IMAGE_TENSORBOARD_PORT) | |
DOCKER_JUPYTER_PORTS = -p $(LOCALHOST_IP):$(HOST_NOTEBOOK_PORT):$(IMAGE_NOTEBOOK_PORT) | |
# IF GPU == false --> GPU is disabled | |
# IF GPU == true --> GPU is enabled | |
ifeq ($(GPU), true) | |
DOCKER_RUN_COMMAND=$(NVIDIA_DOCKER_COMMAND) run -it --rm --userns=host --name=$(DOCKER_CONTAINER_NAME)-$(EXP_NAME) $(GPU_DOCKER_VOLUMES) $(GPU_REGISTRY_URL)/$(GPU_DOCKER_IMAGE):$(GPU_VERSION) | |
DOCKER_RUN_TENSORBOARD_COMMAND=$(DOCKER_COMMAND) run -it --rm --userns=host --name=$(DOCKER_CONTAINER_NAME)-$(EXP_NAME) $(DOCKER_TENSORBOARD_PORTS) $(GPU_DOCKER_VOLUMES) $(GPU_REGISTRY_URL)/$(GPU_DOCKER_IMAGE):$(CPU_VERSION) | |
DOCKER_RUN_JUPYTER_COMMAND=$(NVIDIA_DOCKER_COMMAND) run -it --rm --userns=host --name=$(DOCKER_CONTAINER_NAME)-$(EXP_NAME) $(DOCKER_JUPYTER_PORTS) $(GPU_DOCKER_VOLUMES) $(GPU_REGISTRY_URL)/$(GPU_DOCKER_IMAGE):$(GPU_VERSION) | |
else | |
DOCKER_RUN_COMMAND=$(DOCKER_COMMAND) run -it --rm --userns=host --name=$(DOCKER_CONTAINER_NAME)-$(EXP_NAME) $(CPU_DOCKER_VOLUMES) $(CPU_REGISTRY_URL)/$(CPU_DOCKER_IMAGE):$(CPU_VERSION) | |
DOCKER_RUN_TENSORBOARD_COMMAND=$(DOCKER_COMMAND) run -it --rm --userns=host --name=$(DOCKER_CONTAINER_NAME)-$(EXP_NAME) $(DOCKER_TENSORBOARD_PORTS) $(CPU_DOCKER_VOLUMES) $(CPU_REGISTRY_URL)/$(CPU_DOCKER_IMAGE):$(CPU_VERSION) | |
DOCKER_RUN_JUPYTER_COMMAND=$(DOCKER_COMMAND) run -it --rm --userns=host --name=$(DOCKER_CONTAINER_NAME)-$(EXP_NAME) $(DOCKER_JUPYTER_PORTS) $(CPU_DOCKER_VOLUMES) $(CPU_REGISTRY_URL)/$(CPU_DOCKER_IMAGE):$(CPU_VERSION) | |
endif | |
############################################################################## | |
############################## CODE VARS ##################################### | |
############################################################################## | |
#COMMANDS | |
PYTHON_COMMAND=python3 -m | |
EXPORT_COMMAND=export | |
BASH_COMMAND=bash | |
TENSORBOARD_COMMAND=tensorboard | |
JUPYTER_COMMAND=jupyter | |
#PATHS | |
MMI_LIB_PATH=./libs/MMI_scripts | |
CK_LIB_PATH=./libs/CK_scripts | |
OULU_LIB_PATH=./libs/OULU_scripts | |
# UCF_LIB_PATH=./libs/UCF101-scripts | |
# MMNIST_LIB_PATH=./libs/MMnist-scripts | |
#FILES | |
PROCESS_DATABASE_FILE=process_database.sh | |
TRAIN_FILE=mains.train | |
BENCHMARK_FILE=mains.benchmark | |
KFOLD_TRAIN_FILE=mains.kfold_train | |
PREDICT_FILE=predict.py | |
# CONFIG FILES | |
NETWORK_CF="" | |
OPTIMIZER="" | |
OPTIMIZER_CF="" | |
DATASET="" | |
DATASET_CF="" | |
BENCHMARK_CF="" | |
METADATA_CF=./configs/metadata/metadata.json | |
GLOBAL_CONFIG_CF=./configs/global_config/global_config.json | |
# select dataset | |
ifeq ($(DATASET), ck) | |
DATASET_CF=./configs/datasets/ck_plus.json | |
endif | |
ifeq ($(DATASET), mmi) | |
DATASET_CF=./configs/datasets/mmi.json | |
endif | |
ifeq ($(DATASET), oulu) | |
DATASET_CF=./configs/datasets/oulu_casia.json | |
endif | |
# Select optimizer | |
ifeq ($(OPTIMIZER), gd) | |
OPTIMIZER_CF=./configs/optimizers/gd.json | |
endif | |
ifeq ($(OPTIMIZER), adadelta) | |
OPTIMIZER_CF=./configs/optimizers/adadelta.json | |
endif | |
ifeq ($(OPTIMIZER), adagrad) | |
OPTIMIZER_CF=./configs/optimizers/adagrad.json | |
endif | |
ifeq ($(OPTIMIZER), momentum) | |
OPTIMIZER_CF=./configs/optimizers/momentum.json | |
endif | |
ifeq ($(OPTIMIZER), rmsprop) | |
OPTIMIZER_CF=./configs/optimizers/rmsprop.json | |
endif | |
ifeq ($(OPTIMIZER), adam) | |
OPTIMIZER_CF=./configs/optimizers/adam.json | |
endif | |
############################################################################## | |
############################ CODE COMMANDS ################################### | |
############################################################################## | |
all: train | |
process-dataset pd: | |
ifeq ($(PROCESS_DATASET), ck) | |
$(call cecho, "[Dataset Processing] Processing CK+..") | |
@$(BASH_COMMAND) $(CK_LIB_PATH)/$(PROCESS_DATABASE_FILE) | |
endif | |
ifeq ($(PROCESS_DATASET), mmi) | |
$(call cecho, "[Dataset Processing] Processing MMI..") | |
@$(BASH_COMMAND) $(MMI_LIB_PATH)/$(PROCESS_DATABASE_FILE) | |
endif | |
ifeq ($(PROCESS_DATASET), oulu) | |
$(call cecho, "[Dataset Processing] Processing Oulu Casia..") | |
@$(BASH_COMMAND) $(OULU_LIB_PATH)/$(PROCESS_DATABASE_FILE) | |
endif | |
# | |
# ifeq ($(PROCESS_DATASET), ucf) | |
# @echo "[Dataset Processing] Processing UCF-101.." | |
# @$(BASH_COMMAND) $(UCF_LIB_PATH)/$(PROCESS_DATABASE_FILE) | |
# endif | |
# | |
# ifeq ($(PROCESS_DATASET), mmnist) | |
# @echo "[Dataset Processing] Processing Moving Mnist.." | |
# @$(BASH_COMMAND) $(MMNIST_LIB_PATH)/$(PROCESS_DATABASE_FILE) | |
# endif | |
ifeq ($(PROCESS_DATASET), "") | |
$(call cecho, "[Dataset Processing] dataset not selected...") | |
$(call cecho, "\t example: make process-dataset PROCESS_DATASET=ck") | |
$(call cecho, "\t example: make process-dataset PROCESS_DATASET=mmi") | |
endif | |
train t: | |
$(call cecho, "[Train] Trainning model") | |
$(call cecho, " Using CUDA_VISIBLE_DEVICES: "$(CUDA_VISIBLE_DEVICES)) | |
$(call cecho, " Trainning with: "$(NETWORK_CF) $(OPTIMIZER_CF) $(DATASET_CF) $(METADATA_CF) $(GLOBAL_CONFIG_CF)) | |
@$(EXPORT_COMMAND) CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) | |
@$(PYTHON_COMMAND) $(TRAIN_FILE) -cd $(DATASET_CF) -cn $(NETWORK_CF) -ct $(OPTIMIZER_CF) -cg $(GLOBAL_CONFIG_CF) -cm $(METADATA_CF) | |
benchmark b: | |
$(call cecho, "[Benchmark] Get benchmark of datasets") | |
$(call cecho, " Using CUDA_VISIBLE_DEVICES: "$(CUDA_VISIBLE_DEVICES)) | |
$(call cecho, " Benchmark with: "$(NETWORK_CF) $(OPTIMIZER_CF) $(BENCHMARK_CF) $(METADATA_CF) $(GLOBAL_CONFIG_CF)) | |
@$(EXPORT_COMMAND) CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) | |
@$(PYTHON_COMMAND) $(BENCHMARK_FILE) -cb $(BENCHMARK_CF) -cn $(NETWORK_CF) -ct $(OPTIMIZER_CF) -cg $(GLOBAL_CONFIG_CF) -cm $(METADATA_CF) | |
kfoldtrain kft: | |
$(call cecho, "[Kfold train] trainning k model") | |
$(call cecho, " Using CUDA_VISIBLE_DEVICES: "$(CUDA_VISIBLE_DEVICES)) | |
$(call cecho, " Trainning with: "$(NETWORK_CF) $(OPTIMIZER_CF) $(DATASET_CF) $(METADATA_CF) $(GLOBAL_CONFIG_CF)) | |
@$(EXPORT_COMMAND) CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) | |
@$(PYTHON_COMMAND) $(KFOLD_TRAIN_FILE) -cd $(DATASET_CF) -cn $(NETWORK_CF) -ct $(OPTIMIZER_CF) -cg $(GLOBAL_CONFIG_CF) -cm $(METADATA_CF) | |
# predict p: | |
# @echo "[Predict] Evaluating test" | |
# ifneq ($(CONFIG_FILE), "") | |
# @echo "\t Using CUDA_VISIBLE_DEVICES: "$(CUDA_VISIBLE_DEVICES) | |
# @echo "\t Predict with: "$(CONFIG_FILE) | |
# @$(EXPORT_COMMAND) CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) | |
# @$(PYTHON_COMMAND) $(PREDICT_FILE) -c $(CONFIG_FILE) | |
# else | |
# @echo "[Predict] config file not selected..." | |
# @echo "\t examples" | |
# @echo "\t\tmake predict CONFIG_FILE=./configs/ck-config.json" | |
# @echo "\t\tmake predict CONFIG_FILE=./configs/mmi-config.json" | |
# endif | |
tensorboard tb: | |
$(call cecho, "[Tensorboard] Running Tensorboard") | |
@$(TENSORBOARD_COMMAND) --logdir=$(IMAGE_METADATA_PATH) --host 0.0.0.0 | |
jupyter jp: | |
$(call cecho, "[Jupyter] Running Jupyter lab") | |
@$(JUPYTER_COMMAND) lab --allow-root | |
############################################################################## | |
########################### DOCKER COMMANDS ################################## | |
############################################################################## | |
#make train CUDA_VISIBLE_DEVICES=0,1 CONFIG_FILE=./configs/ck-config.json | |
run rc: docker-print | |
@$(DOCKER_RUN_COMMAND) bash -c "make train CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) \ | |
NETWORK_CF=$(NETWORK_CF) \ | |
OPTIMIZER_CF=$(OPTIMIZER_CF) \ | |
DATASET_CF=$(DATASET_CF) \ | |
METADATA_CF=$(METADATA_CF) \ | |
GLOBAL_CONFIG_CF=$(GLOBAL_CONFIG_CF)"; \ | |
status=$$ | |
run-benchmark rb: docker-print | |
@$(DOCKER_RUN_COMMAND) bash -c "make benchmark CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) \ | |
NETWORK_CF=$(NETWORK_CF) \ | |
OPTIMIZER_CF=$(OPTIMIZER_CF) \ | |
BENCHMARK_CF=$(BENCHMARK_CF) \ | |
METADATA_CF=$(METADATA_CF) \ | |
GLOBAL_CONFIG_CF=$(GLOBAL_CONFIG_CF)"; \ | |
status=$$ | |
run-kfold rkfc: docker-print | |
@$(DOCKER_RUN_COMMAND) bash -c "make kfoldtrain CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) \ | |
NETWORK_CF=$(NETWORK_CF) \ | |
OPTIMIZER_CF=$(OPTIMIZER_CF) \ | |
DATASET_CF=$(DATASET_CF) \ | |
METADATA_CF=$(METADATA_CF) \ | |
GLOBAL_CONFIG_CF=$(GLOBAL_CONFIG_CF)"; \ | |
status=$$ | |
# run-predict rp: docker-print | |
# ifneq ($(CONFIG_FILE), "") | |
# @$(DOCKER_RUN_COMMAND) bash -c "make predict CUDA_VISIBLE_DEVICES=$(CUDA_VISIBLE_DEVICES) CONFIG_FILE=$(CONFIG_FILE)"; \ | |
# status=$$ | |
# else | |
# @echo "[Predic] config file not selected..." | |
# @echo "\t examples with CPU" | |
# @echo "\t\tmake test CONFIG_FILE=./configs/ck-config.json" | |
# @echo "\t\tmake test CONFIG_FILE=./configs/mmi-config.json" | |
# @echo "\t examples with GPU and specific cuda devise" | |
# @echo "\t\tmake test CONFIG_FILE=./configs/ck-config.json GPU=true CUDA_VISIBLE_DEVICES=0" | |
# @echo "\t\tmake test CONFIG_FILE=./configs/mmi-config.json GPU=true CUDA_VISIBLE_DEVICES=0,1" | |
# endif | |
run-test rtm: docker-print | |
@$(DOCKER_RUN_COMMAND) | |
run-tensorboard rt: docker-print | |
@$(DOCKER_RUN_TENSORBOARD_COMMAND) bash -c "make tensorboard IMAGE_METADATA_PATH=$(IMAGE_METADATA_PATH)"; \ | |
status=$$? | |
run-jupyter rj: docker-print | |
@$(DOCKER_RUN_JUPYTER_COMMAND) bash -c "make jupyter"; \ | |
status=$$? | |
run-dataset rpd: docker-print | |
ifneq ($(PROCESS_DATASET), "") | |
@$(DOCKER_RUN_COMMAND) bash -c "make process-dataset PROCESS_DATASET=$(PROCESS_DATASET)"; \ | |
status=$$? | |
endif | |
ifeq ($(PROCESS_DATASET), "") | |
@echo "[Dataset Processing] dataset not selected..." | |
@echo "\t example: make run-dataset PROCESS_DATASET=ck" | |
@echo "\t example: make run-dataset PROCESS_DATASET=mmi" | |
endif | |
#PRIVATE | |
docker-print psd: | |
ifeq ($(GPU), true) | |
$(call cecho, "[GPU Docker] Running gpu docker image...") | |
else | |
$(call cecho, "[CPU Docker] Running cpu docker image...") | |
endif | |
help: | |
@echo "" | |
@echo "Makefile for $(PROGRAM)" | |
@echo "" | |
@echo "DOCKER COMMANDS" | |
@echo "make [config-files | run-cpu | run-cpu-tensorboard | run-gpu | run-gpu-tensorboard]" | |
@echo "-----------------------------------------------------------------------------------" | |
@echo " - config-files : Display a list of config files" | |
@echo " - run : Run docker with train command and specific configurations \ | |
\n\t\t\tconfigurable variables: \ | |
\n\t\t\t* CONFIG_FILE=path \ | |
\n\t\t\t* GPU=true|false \ | |
\n\t\t\t* CUDA_VISIBLE_DEVICES=list" | |
@echo " - run-tensorboard : Run docker image with tensorboard command" | |
@echo " - run-dataset : Run docker image with process-* command \ | |
\n\t\t\tconfigurable variables: \ | |
\n\t\t\t* PROCESS_DATASET=ck|mmi|ucf|mmnist" | |
@echo " - run-test : Run docker image in test mode" | |
@echo "" | |
@echo "EXPERIMENT COMMANDS" | |
@echo "make [process-dataset | train | tensorboard]" | |
@echo "-----------------------------------------------------" | |
@echo " - process-dataset : Process dataset" | |
@echo " - train : Run experiment" | |
@echo " - tensorboard : Run tensorboard" | |
.PHONY: config-files run-cpu run-cpu-tensorboard run-gpu run-gpu-tensorboard process-ck process-mmi train tensorboard |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment