Published October 1, 2020
| Version v1
Other
Open
ESPnet2 pretrained model, kan-bayashi/csmsc_tts_train_fastspeech_raw_phn_pypinyin_g2p_phone_train.loss.ave, fs=24000, lang=zh
Creators
Description
This model was trained by kan-bayashi using csmsc/tts1 recipe in espnet.
- Python API
See https://github.com/espnet/espnet_model_zoo - Evaluate in the recipe
git clone https://github.com/espnet/espnet cd espnet git checkout 51352aee9ae318640e128a645e722d1f7524edb1 pip install -e . cd egs2/csmsc/tts1 # Download the model file here ./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/csmsc_tts_train_fastspeech_raw_phn_pypinyin_g2p_phone_train.loss.ave - Config
config: conf/tuning/train_fastspeech.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/tts_train_fastspeech_raw_phn_pypinyin_g2p_phone ngpu: 1 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: 4 dist_rank: 0 local_rank: 0 dist_master_addr: localhost dist_master_port: 49841 dist_launcher: null multiprocessing_distributed: true cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 200 patience: null val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - valid - loss - min - - train - loss - min keep_nbest_models: 5 grad_clip: 1.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 1 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null pretrain_path: [] pretrain_key: [] num_iters_per_epoch: 500 batch_size: 20 valid_batch_size: null batch_bins: 4800000 valid_batch_bins: null train_shape_file: - exp/tts_stats_raw_phn_pypinyin_g2p_phone/train/text_shape.phn - exp/tts_train_tacotron2_raw_phn_pypinyin_g2p_phone/decode_train.loss.ave/tr_no_dev/speech_shape valid_shape_file: - exp/tts_stats_raw_phn_pypinyin_g2p_phone/valid/text_shape.phn - exp/tts_train_tacotron2_raw_phn_pypinyin_g2p_phone/decode_train.loss.ave/dev/speech_shape batch_type: numel valid_batch_type: null fold_length: - 150 - 800 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/tr_no_dev/text - text - text - - exp/tts_train_tacotron2_raw_phn_pypinyin_g2p_phone/decode_train.loss.ave/tr_no_dev/durations - durations - text_int - - exp/tts_train_tacotron2_raw_phn_pypinyin_g2p_phone/decode_train.loss.ave/tr_no_dev/denorm/feats.scp - speech - npy valid_data_path_and_name_and_type: - - dump/raw/dev/text - text - text - - exp/tts_train_tacotron2_raw_phn_pypinyin_g2p_phone/decode_train.loss.ave/dev/durations - durations - text_int - - exp/tts_train_tacotron2_raw_phn_pypinyin_g2p_phone/decode_train.loss.ave/dev/denorm/feats.scp - speech - npy allow_variable_data_keys: false max_cache_size: 0.0 valid_max_cache_size: null optim: adam optim_conf: lr: 1.0 scheduler: noamlr scheduler_conf: model_size: 384 warmup_steps: 4000 token_list: - - - d - sh - j - l - "\u3002" - zh - "\uFF0C" - i4 - x - h - b - e - g - t - m - z - q - i1 - i3 - ch - u4 - n - f - i2 - r - k - s - e4 - ai4 - a1 - c - p - ian4 - uo3 - ao3 - ai2 - ao4 - an4 - ong1 - u3 - ing2 - en2 - e2 - u2 - ui4 - ian2 - iou3 - ang4 - u1 - iao4 - uo4 - eng2 - a4 - in1 - eng1 - ou3 - ang1 - ian1 - ou4 - ing1 - uo1 - an1 - ian3 - ie3 - a3 - ing4 - an3 - an2 - "\xFC4" - iao3 - ei4 - ong2 - en1 - uei4 - "\xFCan2" - ang2 - ang3 - iu4 - iang4 - ai3 - ao1 - ou1 - eng4 - iang3 - en3 - ai1 - ong4 - ie4 - e3 - ia1 - uo2 - ia4 - "\xFC3" - uan1 - er2 - ei3 - ei2 - iang1 - i - ing3 - en4 - "\xFC2" - uan3 - e1 - in2 - iao1 - in4 - ie1 - ong3 - iang2 - ie2 - uan4 - a2 - ui3 - eng3 - uan2 - "\xFCe4" - uai4 - ou2 - "\uFF1F" - "\xFCe2" - in3 - uang3 - uang1 - iu2 - en - a - ao2 - ua4 - un1 - ui1 - uei2 - iong4 - uang2 - v3 - ui2 - iao2 - uang4 - "\xFC1" - ei1 - o2 - er4 - iou2 - iou4 - "\uFF01" - ua1 - "\xFCan4" - iu3 - un4 - "\xFCan3" - uen2 - "\xFCn4" - iu1 - un3 - uen4 - er3 - "\xFCn1" - un2 - "\xFCn2" - o4 - o1 - ua2 - uei1 - uei3 - ia3 - iong3 - ua3 - ia - "\xFCe1" - v4 - "\xFCan1" - iong1 - ia2 - uai1 - iong2 - iou1 - uai3 - "\xFCe3" - uen1 - uen3 - uai2 - o3 - er - ve4 - io1 - "\xFCn3" - u - ou - o - ang - ueng1 - v2 - uo - ao - ueng4 - ua - ei - uen - an - '2' - ueng3 - iang - "\xFC" - ie - "\uFF30" - "\uFF22" - ai - odim: 80 model_conf: {} use_preprocessor: true token_type: phn bpemodel: null non_linguistic_symbols: null cleaner: null g2p: pypinyin_g2p_phone feats_extract: null feats_extract_conf: null normalize: global_mvn normalize_conf: stats_file: exp/tts_stats_raw_phn_pypinyin_g2p_phone/train/feats_stats.npz tts: fastspeech tts_conf: adim: 384 aheads: 2 elayers: 6 eunits: 1536 dlayers: 6 dunits: 1536 positionwise_layer_type: conv1d positionwise_conv_kernel_size: 3 duration_predictor_layers: 2 duration_predictor_chans: 384 duration_predictor_kernel_size: 3 postnet_layers: 5 postnet_filts: 5 postnet_chans: 256 use_masking: true use_scaled_pos_enc: true encoder_normalize_before: true decoder_normalize_before: true reduction_factor: 1 init_type: xavier_uniform init_enc_alpha: 1.0 init_dec_alpha: 1.0 transformer_enc_dropout_rate: 0.1 transformer_enc_positional_dropout_rate: 0.1 transformer_enc_attn_dropout_rate: 0.1 transformer_dec_dropout_rate: 0.1 transformer_dec_positional_dropout_rate: 0.1 transformer_dec_attn_dropout_rate: 0.1 pitch_extract: null pitch_extract_conf: {} pitch_normalize: null pitch_normalize_conf: {} energy_extract: null energy_extract_conf: {} energy_normalize: null energy_normalize_conf: {} required: - output_dir - token_list distributed: true
Files
tts_train_fastspeech_raw_phn_pypinyin_g2p_phone_train.loss.ave.zip
Files
(207.8 MB)
| Name | Size | Download all |
|---|---|---|
|
md5:8cff3e56cf16de1dd776c9da39a0df90
|
207.8 MB | Preview Download |
Additional details
Related works
- Is supplement to
- https://github.com/espnet/espnet (URL)