Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
TensorSpeech
GitHub Repository: TensorSpeech/TensorFlowTTS
Path: blob/master/examples/melgan_stft/conf/melgan_stft.v1.yaml
1559 views
1
2
# This is the hyperparameter configuration file for MelGAN with Multi Resolution STFT.
3
# Please make sure this is adjusted for the LJSpeech dataset. If you want to
4
# apply to the other dataset, you might need to carefully change some parameters.
5
# This configuration performs 4000k iters.
6
7
###########################################################
8
# FEATURE EXTRACTION SETTING #
9
###########################################################
10
sampling_rate: 22050
11
hop_size: 256 # Hop size.
12
format: "npy"
13
14
15
###########################################################
16
# GENERATOR NETWORK ARCHITECTURE SETTING #
17
###########################################################
18
model_type: "melgan_generator"
19
20
melgan_generator_params:
21
out_channels: 1 # Number of output channels.
22
kernel_size: 7 # Kernel size of initial and final conv layers.
23
filters: 512 # Initial number of channels for conv layers.
24
upsample_scales: [8, 8, 2, 2] # List of Upsampling scales.
25
stack_kernel_size: 3 # Kernel size of dilated conv layers in residual stack.
26
stacks: 3 # Number of stacks in a single residual stack module.
27
is_weight_norm: false
28
29
###########################################################
30
# DISCRIMINATOR NETWORK ARCHITECTURE SETTING #
31
###########################################################
32
melgan_discriminator_params:
33
out_channels: 1 # Number of output channels.
34
scales: 3 # Number of multi-scales.
35
downsample_pooling: "AveragePooling1D" # Pooling type for the input downsampling.
36
downsample_pooling_params: # Parameters of the above pooling function.
37
pool_size: 4
38
strides: 2
39
kernel_sizes: [5, 3] # List of kernel size.
40
filters: 16 # Number of channels of the initial conv layer.
41
max_downsample_filters: 1024 # Maximum number of channels of downsampling layers.
42
downsample_scales: [4, 4, 4, 4] # List of downsampling scales.
43
nonlinear_activation: "LeakyReLU" # Nonlinear activation function.
44
nonlinear_activation_params: # Parameters of nonlinear activation function.
45
alpha: 0.2
46
is_weight_norm: false
47
48
###########################################################
49
# STFT LOSS SETTING #
50
###########################################################
51
stft_loss_params:
52
fft_lengths: [1024, 2048, 512] # List of FFT size for STFT-based loss.
53
frame_steps: [120, 240, 50] # List of hop size for STFT-based loss
54
frame_lengths: [600, 1200, 240] # List of window length for STFT-based loss.
55
56
57
###########################################################
58
# ADVERSARIAL LOSS SETTING #
59
###########################################################
60
lambda_feat_match: 10.0
61
lambda_adv: 4.0
62
63
###########################################################
64
# DATA LOADER SETTING #
65
###########################################################
66
batch_size: 16 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.
67
batch_max_steps: 8192 # Length of each audio in batch for training. Make sure dividable by hop_size.
68
batch_max_steps_valid: 81920 # Length of each audio for validation. Make sure dividable by hope_size.
69
remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
70
allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.
71
is_shuffle: true # shuffle dataset after each epoch.
72
73
###########################################################
74
# OPTIMIZER & SCHEDULER SETTING #
75
###########################################################
76
generator_optimizer_params:
77
lr_fn: "PiecewiseConstantDecay"
78
lr_params:
79
boundaries: [100000] # = discriminator_train_start_steps.
80
values: [0.0005, 0.0001] # learning rate each interval.
81
82
83
discriminator_optimizer_params:
84
lr_fn: "PiecewiseConstantDecay"
85
lr_params:
86
boundaries: [0] # after resume and start training discriminator, global steps is 100k, but local discriminator step is 0
87
values: [0.0001, 0.0001] # learning rate each interval.
88
89
gradient_accumulation_steps: 1
90
###########################################################
91
# INTERVAL SETTING #
92
###########################################################
93
discriminator_train_start_steps: 100000 # steps begin training discriminator
94
train_max_steps: 4000000 # Number of training steps.
95
save_interval_steps: 20000 # Interval steps to save checkpoint.
96
eval_interval_steps: 5000 # Interval steps to evaluate the network.
97
log_interval_steps: 200 # Interval steps to record the training log.
98
99
###########################################################
100
# OTHER SETTING #
101
###########################################################
102
num_save_intermediate_results: 1 # Number of batch to be saved as intermediate results.
103
104