Other Open Access

ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp

kan-bayashi


Citation Style Language JSON Export

{
  "publisher": "Zenodo", 
  "DOI": "10.5281/zenodo.4032224", 
  "author": [
    {
      "family": "kan-bayashi"
    }
  ], 
  "issued": {
    "date-parts": [
      [
        2020, 
        9, 
        16
      ]
    ]
  }, 
  "abstract": "This model was trained by kan-bayashi using jsut/tts1 recipe in <a href=\"https://github.com/espnet/espnet/\">espnet</a>.\n<p>&nbsp;</p>\n<ul>\n<li><strong>Python API</strong><pre><code class=\"language-python\">See https://github.com/espnet/espnet_model_zoo</code></pre></li>\n<li><strong>Evaluate in the recipe</strong><pre><code class=\"language-bash\">git clone https://github.com/espnet/espnet\ncd espnet\ngit checkout d8a028a24cddab209157f62cbb64aca26fefffc0\npip install -e .\ncd egs2/jsut/tts1\n# Download the model file here\n./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave</code>\n</pre></li>\n<li><strong>Config</strong><pre><code>config: conf/tuning/train_fastspeech2.yaml\nprint_config: false\nlog_level: INFO\ndry_run: false\niterator_type: sequence\noutput_dir: exp/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk\nngpu: 1\nseed: 0\nnum_workers: 1\nnum_att_plot: 3\ndist_backend: nccl\ndist_init_method: env://\ndist_world_size: null\ndist_rank: null\nlocal_rank: 0\ndist_master_addr: null\ndist_master_port: null\ndist_launcher: null\nmultiprocessing_distributed: false\ncudnn_enabled: true\ncudnn_benchmark: false\ncudnn_deterministic: true\ncollect_stats: false\nwrite_collected_feats: false\nmax_epoch: 1000\npatience: null\nval_scheduler_criterion:\n- valid\n- loss\nearly_stopping_criterion:\n- valid\n- loss\n- min\nbest_model_criterion:\n-   - valid\n    - loss\n    - min\n-   - train\n    - loss\n    - min\nkeep_nbest_models: 5\ngrad_clip: 1.0\ngrad_clip_type: 2.0\ngrad_noise: false\naccum_grad: 8\nno_forward_run: false\nresume: true\ntrain_dtype: float32\nuse_amp: false\nlog_interval: null\npretrain_path: []\npretrain_key: []\nnum_iters_per_epoch: 500\nbatch_size: 20\nvalid_batch_size: null\nbatch_bins: 3000000\nvalid_batch_bins: null\ntrain_shape_file:\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/text_shape.phn\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/speech_shape\nvalid_shape_file:\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/text_shape.phn\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/speech_shape\nbatch_type: numel\nvalid_batch_type: null\nfold_length:\n- 150\n- 240000\nsort_in_batch: descending\nsort_batch: descending\nmultiple_iterator: false\nchunk_length: 500\nchunk_shift_ratio: 0.5\nnum_cache_chunks: 1024\ntrain_data_path_and_name_and_type:\n-   - dump/raw/tr_no_dev/text\n    - text\n    - text\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/tr_no_dev/durations\n    - durations\n    - text_int\n-   - dump/raw/tr_no_dev/wav.scp\n    - speech\n    - sound\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/pitch.scp\n    - pitch\n    - npy\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/energy.scp\n    - energy\n    - npy\nvalid_data_path_and_name_and_type:\n-   - dump/raw/dev/text\n    - text\n    - text\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/dev/durations\n    - durations\n    - text_int\n-   - dump/raw/dev/wav.scp\n    - speech\n    - sound\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/pitch.scp\n    - pitch\n    - npy\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/energy.scp\n    - energy\n    - npy\nallow_variable_data_keys: false\nmax_cache_size: 0.0\nvalid_max_cache_size: null\noptim: adam\noptim_conf:\n    lr: 1.0\nscheduler: noamlr\nscheduler_conf:\n    model_size: 384\n    warmup_steps: 4000\ntoken_list:\n- \n- \n- ty\n- dy\n- v\n- py\n- my\n- by\n- ny\n- hy\n- gy\n- ry\n- ky\n- f\n- p\n- z\n- ch\n- ts\n- j\n- b\n- y\n- h\n- cl\n- I\n- U\n- w\n- g\n- d\n- sh\n- pau\n- m\n- N\n- s\n- r\n- t\n- n\n- k\n- e\n- u\n- i\n- o\n- a\n- \nodim: null\nmodel_conf: {}\nuse_preprocessor: true\ntoken_type: phn\nbpemodel: null\nnon_linguistic_symbols: null\ncleaner: jaconv\ng2p: pyopenjtalk\nfeats_extract: fbank\nfeats_extract_conf:\n    fs: 24000\n    fmin: 80\n    fmax: 7600\n    n_mels: 80\n    hop_length: 300\n    n_fft: 2048\n    win_length: 1200\nnormalize: global_mvn\nnormalize_conf:\n    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/feats_stats.npz\ntts: fastspeech2\ntts_conf:\n    adim: 384\n    aheads: 2\n    elayers: 4\n    eunits: 1536\n    dlayers: 4\n    dunits: 1536\n    positionwise_layer_type: conv1d\n    positionwise_conv_kernel_size: 3\n    duration_predictor_layers: 2\n    duration_predictor_chans: 256\n    duration_predictor_kernel_size: 3\n    postnet_layers: 5\n    postnet_filts: 5\n    postnet_chans: 256\n    use_masking: true\n    use_scaled_pos_enc: true\n    encoder_normalize_before: false\n    decoder_normalize_before: false\n    reduction_factor: 1\n    init_type: xavier_uniform\n    init_enc_alpha: 1.0\n    init_dec_alpha: 1.0\n    transformer_enc_dropout_rate: 0.2\n    transformer_enc_positional_dropout_rate: 0.2\n    transformer_enc_attn_dropout_rate: 0.2\n    transformer_dec_dropout_rate: 0.2\n    transformer_dec_positional_dropout_rate: 0.2\n    transformer_dec_attn_dropout_rate: 0.2\n    pitch_predictor_layers: 5\n    pitch_predictor_chans: 256\n    pitch_predictor_kernel_size: 5\n    pitch_predictor_dropout: 0.5\n    pitch_embed_kernel_size: 1\n    pitch_embed_dropout: 0.0\n    stop_gradient_from_pitch_predictor: true\n    energy_predictor_layers: 2\n    energy_predictor_chans: 256\n    energy_predictor_kernel_size: 3\n    energy_predictor_dropout: 0.5\n    energy_embed_kernel_size: 1\n    energy_embed_dropout: 0.0\n    stop_gradient_from_energy_predictor: false\npitch_extract: dio\npitch_extract_conf:\n    fs: 24000\n    n_fft: 2048\n    hop_length: 300\n    f0max: 400\n    f0min: 80\npitch_normalize: global_mvn\npitch_normalize_conf:\n    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/pitch_stats.npz\nenergy_extract: energy\nenergy_extract_conf:\n    fs: 24000\n    n_fft: 2048\n    hop_length: 300\n    win_length: 1200\nenergy_normalize: global_mvn\nenergy_normalize_conf:\n    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/energy_stats.npz\nrequired:\n- output_dir\n- token_list\ndistributed: false</code></pre></li>\n</ul>", 
  "title": "ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp", 
  "type": "article", 
  "id": "4032224"
}
48
634
views
downloads
All versions This version
Views 4848
Downloads 634634
Data volume 94.7 GB94.7 GB
Unique views 4545
Unique downloads 390390

Share

Cite as