Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
TensorSpeech
GitHub Repository: TensorSpeech/TensorFlowTTS
Path: blob/master/examples/fastspeech2/conf/fastspeech2.baker.v2.yaml
1559 views
1
# This is the hyperparameter configuration file for FastSpeech2 v2.
2
# the different of v2 and v1 is that v2 apply linformer technique.
3
# Please make sure this is adjusted for the Baker dataset. If you want to
4
# apply to the other dataset, you might need to carefully change some parameters.
5
# This configuration performs 200k iters but a best checkpoint is around 150k iters.
6
7
###########################################################
8
# FEATURE EXTRACTION SETTING #
9
###########################################################
10
hop_size: 256 # Hop size.
11
format: "npy"
12
13
14
###########################################################
15
# NETWORK ARCHITECTURE SETTING #
16
###########################################################
17
model_type: "fastspeech2"
18
19
fastspeech2_params:
20
dataset: baker
21
n_speakers: 1
22
encoder_hidden_size: 256
23
encoder_num_hidden_layers: 3
24
encoder_num_attention_heads: 2
25
encoder_attention_head_size: 16 # in v1, = 384//2
26
encoder_intermediate_size: 1024
27
encoder_intermediate_kernel_size: 3
28
encoder_hidden_act: "mish"
29
decoder_hidden_size: 256
30
decoder_num_hidden_layers: 3
31
decoder_num_attention_heads: 2
32
decoder_attention_head_size: 16 # in v1, = 384//2
33
decoder_intermediate_size: 1024
34
decoder_intermediate_kernel_size: 3
35
decoder_hidden_act: "mish"
36
variant_prediction_num_conv_layers: 2
37
variant_predictor_filter: 256
38
variant_predictor_kernel_size: 3
39
variant_predictor_dropout_rate: 0.5
40
num_mels: 80
41
hidden_dropout_prob: 0.2
42
attention_probs_dropout_prob: 0.1
43
max_position_embeddings: 2048
44
initializer_range: 0.02
45
output_attentions: False
46
output_hidden_states: False
47
48
###########################################################
49
# DATA LOADER SETTING #
50
###########################################################
51
batch_size: 16 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.
52
remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
53
allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.
54
mel_length_threshold: 32 # remove all targets has mel_length <= 32
55
is_shuffle: true # shuffle dataset after each epoch.
56
###########################################################
57
# OPTIMIZER & SCHEDULER SETTING #
58
###########################################################
59
optimizer_params:
60
initial_learning_rate: 0.001
61
end_learning_rate: 0.00005
62
decay_steps: 150000 # < train_max_steps is recommend.
63
warmup_proportion: 0.02
64
weight_decay: 0.001
65
66
gradient_accumulation_steps: 1
67
var_train_expr: null # trainable variable expr (eg. 'embeddings|encoder|decoder' )
68
# must separate by |. if var_train_expr is null then we
69
# training all variable
70
###########################################################
71
# INTERVAL SETTING #
72
###########################################################
73
train_max_steps: 200000 # Number of training steps.
74
save_interval_steps: 5000 # Interval steps to save checkpoint.
75
eval_interval_steps: 500 # Interval steps to evaluate the network.
76
log_interval_steps: 200 # Interval steps to record the training log.
77
delay_f0_energy_steps: 3 # 2 steps use LR outputs only then 1 steps LR + F0 + Energy.
78
###########################################################
79
# OTHER SETTING #
80
###########################################################
81
num_save_intermediate_results: 1 # Number of batch to be saved as intermediate results.
82
83