Skip to content

Instantly share code, notes, and snippets.

@OlegJakushkin
Last active January 3, 2021 22:04
Show Gist options
  • Save OlegJakushkin/64332b344ff16001674dcaa45db6bdc2 to your computer and use it in GitHub Desktop.
Save OlegJakushkin/64332b344ff16001674dcaa45db6bdc2 to your computer and use it in GitHub Desktop.
---
default:
sr: 16000
frame_shift: 0.005
frame_length: 0.025
win_length: 400
hop_length: 80
n_fft: 512
preemphasis: 0.97
n_mfcc: 40
n_iter: 60 # Number of inversion iterations
n_mels: 80
duration: 2
max_db: 35
min_db: -55
# model
hidden_units: 256 # alias: E
num_banks: 16
num_highway_blocks: 4
norm_type: 'ins' # a normalizer function. value: bn, ln, ins, or None
t: 1.0 # temperature
dropout_rate: 0.2
# train
batch_size: 32
logdir_path: '/data/private/vc/logdir'
train1:
# path
data_path: '/content/deep-voice-conversion/timit/data/TRAIN/*/*/*.wav'
# model
hidden_units: 128 # alias: E
num_banks: 8
num_highway_blocks: 4
norm_type: 'ins' # a normalizer function. value: bn, ln, ins, or None
t: 1.0 # temperature
dropout_rate: 0.2
# train
batch_size: 32
lr: 0.0003
num_epochs: 1000
steps_per_epoch: 100
save_per_epoch: 2
num_gpu: 2
---
train2:
# path
data_path: '/data/private/vc/datasets/arctic/slt/*.wav'
# model
hidden_units: 256 # alias: E
num_banks: 8
num_highway_blocks: 8
norm_type: 'ins' # a normalizer function. value: bn, ln, ins, or None
t: 1.0 # temperature
dropout_rate: 0.2
# train
batch_size: 32
lr: 0.0003
lr_cyclic_margin: 0.
lr_cyclic_steps: 5000
clip_value_max: 3.
clip_value_min: -3.
clip_norm: 10
num_epochs: 10000
steps_per_epoch: 100
save_per_epoch: 50
test_per_epoch: 1
num_gpu: 4
---
test1:
# path
data_path: '/data/private/vc/datasets/timit/TIMIT/TEST/*/*/*.wav'
# test
batch_size: 32
---
test2:
# path
data_path: '/data/private/vc/datasets/arctic/slt/*.wav'
# test
batch_size: 32
---
convert:
# path
data_path: '/data/private/vc/datasets/arctic/bdl/*.wav'
# convert
one_full_wav: False
batch_size: 1
emphasis_magnitude: 1.2
! python train1.py -gpu 0 test124
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
# -*- coding: utf-8 -*-
# /usr/bin/python2
import argparse
import multiprocessing
import os
from tensorpack.callbacks.saver import ModelSaver
from tensorpack.tfutils.sessinit import SaverRestore
from tensorpack.train.interface import TrainConfig
from tensorpack.train.interface import launch_train_with_config
from tensorpack.train.trainers import *
from tensorpack.utils import logger
from tensorpack.input_source.input_source import QueueInput
from data_load import Net1DataFlow
from hparam import hparam as hp
from models import Net1
import tensorflow as tf
def train(args, logdir):
# model
model = Net1()
# dataflow
df = Net1DataFlow(hp.train1.data_path, hp.train1.batch_size)
# set logger for event and model saver
logger.set_logger_dir(logdir)
session_conf = tf.ConfigProto(
gpu_options=tf.GPUOptions(
allow_growth=True,
),)
train_conf = TrainConfig(
model=model,
data=QueueInput(df(n_prefetch=1000, n_thread=4)),
callbacks=[
ModelSaver(checkpoint_dir=logdir),
# TODO EvalCallback()
],
max_epoch=hp.train1.num_epochs,
steps_per_epoch=hp.train1.steps_per_epoch,
# session_config=session_conf
)
ckpt = '{}/{}'.format(logdir, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir)
if ckpt:
train_conf.session_init = SaverRestore(ckpt)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
#train_conf.nr_tower = len(args.gpu.split(','))
trainer = SimpleTrainer()
launch_train_with_config(train_conf, trainer=trainer)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('case', type=str, help='experiment case name')
parser.add_argument('-ckpt', help='checkpoint to load model.')
parser.add_argument('-gpu', help='comma separated list of GPU(s) to use.')
arguments = parser.parse_args()
return arguments
if __name__ == '__main__':
args = get_arguments()
hp.set_hparam_yaml(args.case)
logdir_train1 = '{}/train1'.format(hp.logdir)
print('case: {}, logdir: {}'.format(args.case, args.case, logdir_train1))
train(args, logdir=logdir_train1)
print("Done")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment