Path: blob/master/examples/melgan_stft/conf/melgan_stft.v1.yaml
1559 views
1# This is the hyperparameter configuration file for MelGAN with Multi Resolution STFT.2# Please make sure this is adjusted for the LJSpeech dataset. If you want to3# apply to the other dataset, you might need to carefully change some parameters.4# This configuration performs 4000k iters.56###########################################################7# FEATURE EXTRACTION SETTING #8###########################################################9sampling_rate: 2205010hop_size: 256 # Hop size.11format: "npy"121314###########################################################15# GENERATOR NETWORK ARCHITECTURE SETTING #16###########################################################17model_type: "melgan_generator"1819melgan_generator_params:20out_channels: 1 # Number of output channels.21kernel_size: 7 # Kernel size of initial and final conv layers.22filters: 512 # Initial number of channels for conv layers.23upsample_scales: [8, 8, 2, 2] # List of Upsampling scales.24stack_kernel_size: 3 # Kernel size of dilated conv layers in residual stack.25stacks: 3 # Number of stacks in a single residual stack module.26is_weight_norm: false2728###########################################################29# DISCRIMINATOR NETWORK ARCHITECTURE SETTING #30###########################################################31melgan_discriminator_params:32out_channels: 1 # Number of output channels.33scales: 3 # Number of multi-scales.34downsample_pooling: "AveragePooling1D" # Pooling type for the input downsampling.35downsample_pooling_params: # Parameters of the above pooling function.36pool_size: 437strides: 238kernel_sizes: [5, 3] # List of kernel size.39filters: 16 # Number of channels of the initial conv layer.40max_downsample_filters: 1024 # Maximum number of channels of downsampling layers.41downsample_scales: [4, 4, 4, 4] # List of downsampling scales.42nonlinear_activation: "LeakyReLU" # Nonlinear activation function.43nonlinear_activation_params: # Parameters of nonlinear activation function.44alpha: 0.245is_weight_norm: false4647###########################################################48# STFT LOSS SETTING #49###########################################################50stft_loss_params:51fft_lengths: [1024, 2048, 512] # List of FFT size for STFT-based loss.52frame_steps: [120, 240, 50] # List of hop size for STFT-based loss53frame_lengths: [600, 1200, 240] # List of window length for STFT-based loss.545556###########################################################57# ADVERSARIAL LOSS SETTING #58###########################################################59lambda_feat_match: 10.060lambda_adv: 4.06162###########################################################63# DATA LOADER SETTING #64###########################################################65batch_size: 16 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.66batch_max_steps: 8192 # Length of each audio in batch for training. Make sure dividable by hop_size.67batch_max_steps_valid: 81920 # Length of each audio for validation. Make sure dividable by hope_size.68remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.69allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.70is_shuffle: true # shuffle dataset after each epoch.7172###########################################################73# OPTIMIZER & SCHEDULER SETTING #74###########################################################75generator_optimizer_params:76lr_fn: "PiecewiseConstantDecay"77lr_params:78boundaries: [100000] # = discriminator_train_start_steps.79values: [0.0005, 0.0001] # learning rate each interval.808182discriminator_optimizer_params:83lr_fn: "PiecewiseConstantDecay"84lr_params:85boundaries: [0] # after resume and start training discriminator, global steps is 100k, but local discriminator step is 086values: [0.0001, 0.0001] # learning rate each interval.8788gradient_accumulation_steps: 189###########################################################90# INTERVAL SETTING #91###########################################################92discriminator_train_start_steps: 100000 # steps begin training discriminator93train_max_steps: 4000000 # Number of training steps.94save_interval_steps: 20000 # Interval steps to save checkpoint.95eval_interval_steps: 5000 # Interval steps to evaluate the network.96log_interval_steps: 200 # Interval steps to record the training log.9798###########################################################99# OTHER SETTING #100###########################################################101num_save_intermediate_results: 1 # Number of batch to be saved as intermediate results.102103104