Skip to content

Instantly share code, notes, and snippets.

@beala
Last active December 16, 2017 21:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save beala/12b67384570f582908b141c81a7039f9 to your computer and use it in GitHub Desktop.
Save beala/12b67384570f582908b141c81a7039f9 to your computer and use it in GitHub Desktop.
Use Dhall (https://hackage.haskell.org/package/dhall-1.8.2/docs/Dhall-Tutorial.html) and dhall-text (https://github.com/dhall-lang/dhall-text) to generate a training script for tensorflow training.
-- Type decl for parameters to the training script.
{ architecture : Text
, learningRate : List Double
, steps : List Natural
, unknownPercent : Natural
, windowStrideMs : Natural
, startingCheckpoint: Optional Natural
}
-- A template for generating the bash script using dhall-text
let P/List/map = https://ipfs.io/ipfs/Qmbh2ifwcpX9a384vJMehySbV7rdvYhzVbL5ySs84k8BgY/Prelude/List/map
in let P/Text/concat = https://ipfs.io/ipfs/Qmbh2ifwcpX9a384vJMehySbV7rdvYhzVbL5ySs84k8BgY/Prelude/Text/concat
in let P/Text/concatSep = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Text/concatSep
in let P/Natural/show = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Natural/show
in let P/Natural/toInteger = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Natural/toInteger
in let P/Natural/sum = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Natural/sum
in let P/Integer/show = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Integer/show
in let P/Double/show = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Double/show
in let P/Optional/fold = https://ipfs.io/ipfs/QmQ8w5PLcsNz56dMvRtq54vbuPe9cNnCCUXAQp6xLc6Ccx/Prelude/Optional/fold
in let concatSepNat =
\(sep: Text) ->
\(items: List Natural) ->
P/Text/concatSep sep (P/List/map Natural Text (\(n: Natural) -> P/Integer/show (P/Natural/toInteger n)) items)
in let concatSepDouble =
\(sep: Text) ->
\(items: List Double) ->
P/Text/concatSep sep (P/List/map Double Text P/Double/show items)
in let showNat =
\(n: Natural) ->
P/Integer/show (P/Natural/toInteger n)
in let checkpointName =
\(checkpoint: Natural) ->
\(arch: Text) ->
"${arch}.ckpt-${showNat checkpoint}"
in \(config : ./Config) ->
let finalCheckpoint = P/Natural/sum config.steps
in let startCheckpoint =
Optional/fold
Natural
config.startingCheckpoint
Text
(\(path : Natural) -> "\n --start_checkpoint /tmp/speech_commands_train/" ++ checkpointName path config.architecture ++ "\\")
""
in ''
#!/bin/bash
LOGS="/tmp/retrain_logs"
cd
source ./tf/bin/activate
cd speech_commands
mkdir "$LOGS"
tensorboard --logdir=$LOGS &
python train.py --model_architecture ${config.architecture} \
--data_dir ~/speech_dataset \
--how_many_training_steps ${concatSepNat "," config.steps} \
--learning_rate ${concatSepDouble "," config.learningRate} \
--unknown_percentage ${showNat config.unknownPercent} \
--window_stride_ms ${showNat config.windowStrideMs} \${startCheckpoint}
2>&1 | tee "$LOGS/train_output"
python freeze.py \
--start_checkpoint /tmp/speech_commands_train/${config.architecture}.ckpt-${showNat finalCheckpoint} \
--model_architecture ${config.architecture} \
--output_file /tmp/speech_commands_train/freeze.pb \
--window_stride_ms ${showNat config.windowStrideMs}
python label_wav.py \
--wav ../test/audio/ \
--graph /tmp/speech_commands_train/freeze.pb \
--labels ../conv_deeper_labels.txt \
--label_all | tee /tmp/speech_commands_train/final.csv
sudo poweroff
''
-- A concrete config
{ architecture = "simple_deep1"
, learningRate = [0.01, 0.001, 0.0001]
, steps = [+25000, +25000, +6000]
, unknownPercent = +30
, windowStrideMs = +5
, startingCheckpoint = [+1] : Optional Natural
}
@beala
Copy link
Author

beala commented Dec 16, 2017

alex$ dhall-to-text --explain <<< './gen_script ./model_config'
#!/bin/bash

LOGS="/tmp/retrain_logs"

cd
source ./tf/bin/activate
cd speech_commands
mkdir "$LOGS"
tensorboard --logdir=$LOGS &

python train.py --model_architecture simple_deep1 \
  --data_dir ~/speech_dataset \
  --how_many_training_steps 25000,25000,6000 \
  --learning_rate 1.0e-2,1.0e-3,1.0e-4 \
  --unknown_percentage 30 \
  --window_stride_ms 5 \
  --start_checkpoint /tmp/speech_commands_train/simple_deep1.ckpt-1\
  2>&1 | tee "$LOGS/train_output"

python freeze.py \
  --start_checkpoint /tmp/speech_commands_train/simple_deep1.ckpt-56000 \
  --model_architecture simple_deep1 \
  --output_file /tmp/speech_commands_train/freeze.pb \
  --window_stride_ms 5

python label_wav.py \
  --wav ../test/audio/ \
  --graph /tmp/speech_commands_train/freeze.pb \
  --labels ../conv_deeper_labels.txt \
  --label_all | tee /tmp/speech_commands_train/final.csv

sudo poweroff

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment