Other Open Access

ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp

kan-bayashi


JSON Export

{
  "files": [
    {
      "links": {
        "self": "https://zenodo.org/api/files/4e9c7f59-ca57-40ba-a96a-a0fbaf17d7e6/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave.zip"
      }, 
      "checksum": "md5:94153ee8500bc781582ec5d87dec504a", 
      "bucket": "4e9c7f59-ca57-40ba-a96a-a0fbaf17d7e6", 
      "key": "tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave.zip", 
      "type": "zip", 
      "size": 149345503
    }
  ], 
  "owners": [
    116548
  ], 
  "doi": "10.5281/zenodo.4032224", 
  "stats": {
    "version_unique_downloads": 390.0, 
    "unique_views": 45.0, 
    "views": 48.0, 
    "version_views": 48.0, 
    "unique_downloads": 390.0, 
    "version_unique_views": 45.0, 
    "volume": 94685048902.0, 
    "version_downloads": 634.0, 
    "downloads": 634.0, 
    "version_volume": 94685048902.0
  }, 
  "links": {
    "doi": "https://doi.org/10.5281/zenodo.4032224", 
    "conceptdoi": "https://doi.org/10.5281/zenodo.4032223", 
    "bucket": "https://zenodo.org/api/files/4e9c7f59-ca57-40ba-a96a-a0fbaf17d7e6", 
    "conceptbadge": "https://zenodo.org/badge/doi/10.5281/zenodo.4032223.svg", 
    "html": "https://zenodo.org/record/4032224", 
    "latest_html": "https://zenodo.org/record/4032224", 
    "badge": "https://zenodo.org/badge/doi/10.5281/zenodo.4032224.svg", 
    "latest": "https://zenodo.org/api/records/4032224"
  }, 
  "conceptdoi": "10.5281/zenodo.4032223", 
  "created": "2020-09-16T06:58:50.837299+00:00", 
  "updated": "2021-06-22T12:36:24.494555+00:00", 
  "conceptrecid": "4032223", 
  "revision": 2, 
  "id": 4032224, 
  "metadata": {
    "access_right_category": "success", 
    "doi": "10.5281/zenodo.4032224", 
    "description": "This model was trained by kan-bayashi using jsut/tts1 recipe in <a href=\"https://github.com/espnet/espnet/\">espnet</a>.\n<p>&nbsp;</p>\n<ul>\n<li><strong>Python API</strong><pre><code class=\"language-python\">See https://github.com/espnet/espnet_model_zoo</code></pre></li>\n<li><strong>Evaluate in the recipe</strong><pre><code class=\"language-bash\">git clone https://github.com/espnet/espnet\ncd espnet\ngit checkout d8a028a24cddab209157f62cbb64aca26fefffc0\npip install -e .\ncd egs2/jsut/tts1\n# Download the model file here\n./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave</code>\n</pre></li>\n<li><strong>Config</strong><pre><code>config: conf/tuning/train_fastspeech2.yaml\nprint_config: false\nlog_level: INFO\ndry_run: false\niterator_type: sequence\noutput_dir: exp/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk\nngpu: 1\nseed: 0\nnum_workers: 1\nnum_att_plot: 3\ndist_backend: nccl\ndist_init_method: env://\ndist_world_size: null\ndist_rank: null\nlocal_rank: 0\ndist_master_addr: null\ndist_master_port: null\ndist_launcher: null\nmultiprocessing_distributed: false\ncudnn_enabled: true\ncudnn_benchmark: false\ncudnn_deterministic: true\ncollect_stats: false\nwrite_collected_feats: false\nmax_epoch: 1000\npatience: null\nval_scheduler_criterion:\n- valid\n- loss\nearly_stopping_criterion:\n- valid\n- loss\n- min\nbest_model_criterion:\n-   - valid\n    - loss\n    - min\n-   - train\n    - loss\n    - min\nkeep_nbest_models: 5\ngrad_clip: 1.0\ngrad_clip_type: 2.0\ngrad_noise: false\naccum_grad: 8\nno_forward_run: false\nresume: true\ntrain_dtype: float32\nuse_amp: false\nlog_interval: null\npretrain_path: []\npretrain_key: []\nnum_iters_per_epoch: 500\nbatch_size: 20\nvalid_batch_size: null\nbatch_bins: 3000000\nvalid_batch_bins: null\ntrain_shape_file:\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/text_shape.phn\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/speech_shape\nvalid_shape_file:\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/text_shape.phn\n- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/speech_shape\nbatch_type: numel\nvalid_batch_type: null\nfold_length:\n- 150\n- 240000\nsort_in_batch: descending\nsort_batch: descending\nmultiple_iterator: false\nchunk_length: 500\nchunk_shift_ratio: 0.5\nnum_cache_chunks: 1024\ntrain_data_path_and_name_and_type:\n-   - dump/raw/tr_no_dev/text\n    - text\n    - text\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/tr_no_dev/durations\n    - durations\n    - text_int\n-   - dump/raw/tr_no_dev/wav.scp\n    - speech\n    - sound\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/pitch.scp\n    - pitch\n    - npy\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/energy.scp\n    - energy\n    - npy\nvalid_data_path_and_name_and_type:\n-   - dump/raw/dev/text\n    - text\n    - text\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/dev/durations\n    - durations\n    - text_int\n-   - dump/raw/dev/wav.scp\n    - speech\n    - sound\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/pitch.scp\n    - pitch\n    - npy\n-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/energy.scp\n    - energy\n    - npy\nallow_variable_data_keys: false\nmax_cache_size: 0.0\nvalid_max_cache_size: null\noptim: adam\noptim_conf:\n    lr: 1.0\nscheduler: noamlr\nscheduler_conf:\n    model_size: 384\n    warmup_steps: 4000\ntoken_list:\n- \n- \n- ty\n- dy\n- v\n- py\n- my\n- by\n- ny\n- hy\n- gy\n- ry\n- ky\n- f\n- p\n- z\n- ch\n- ts\n- j\n- b\n- y\n- h\n- cl\n- I\n- U\n- w\n- g\n- d\n- sh\n- pau\n- m\n- N\n- s\n- r\n- t\n- n\n- k\n- e\n- u\n- i\n- o\n- a\n- \nodim: null\nmodel_conf: {}\nuse_preprocessor: true\ntoken_type: phn\nbpemodel: null\nnon_linguistic_symbols: null\ncleaner: jaconv\ng2p: pyopenjtalk\nfeats_extract: fbank\nfeats_extract_conf:\n    fs: 24000\n    fmin: 80\n    fmax: 7600\n    n_mels: 80\n    hop_length: 300\n    n_fft: 2048\n    win_length: 1200\nnormalize: global_mvn\nnormalize_conf:\n    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/feats_stats.npz\ntts: fastspeech2\ntts_conf:\n    adim: 384\n    aheads: 2\n    elayers: 4\n    eunits: 1536\n    dlayers: 4\n    dunits: 1536\n    positionwise_layer_type: conv1d\n    positionwise_conv_kernel_size: 3\n    duration_predictor_layers: 2\n    duration_predictor_chans: 256\n    duration_predictor_kernel_size: 3\n    postnet_layers: 5\n    postnet_filts: 5\n    postnet_chans: 256\n    use_masking: true\n    use_scaled_pos_enc: true\n    encoder_normalize_before: false\n    decoder_normalize_before: false\n    reduction_factor: 1\n    init_type: xavier_uniform\n    init_enc_alpha: 1.0\n    init_dec_alpha: 1.0\n    transformer_enc_dropout_rate: 0.2\n    transformer_enc_positional_dropout_rate: 0.2\n    transformer_enc_attn_dropout_rate: 0.2\n    transformer_dec_dropout_rate: 0.2\n    transformer_dec_positional_dropout_rate: 0.2\n    transformer_dec_attn_dropout_rate: 0.2\n    pitch_predictor_layers: 5\n    pitch_predictor_chans: 256\n    pitch_predictor_kernel_size: 5\n    pitch_predictor_dropout: 0.5\n    pitch_embed_kernel_size: 1\n    pitch_embed_dropout: 0.0\n    stop_gradient_from_pitch_predictor: true\n    energy_predictor_layers: 2\n    energy_predictor_chans: 256\n    energy_predictor_kernel_size: 3\n    energy_predictor_dropout: 0.5\n    energy_embed_kernel_size: 1\n    energy_embed_dropout: 0.0\n    stop_gradient_from_energy_predictor: false\npitch_extract: dio\npitch_extract_conf:\n    fs: 24000\n    n_fft: 2048\n    hop_length: 300\n    f0max: 400\n    f0min: 80\npitch_normalize: global_mvn\npitch_normalize_conf:\n    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/pitch_stats.npz\nenergy_extract: energy\nenergy_extract_conf:\n    fs: 24000\n    n_fft: 2048\n    hop_length: 300\n    win_length: 1200\nenergy_normalize: global_mvn\nenergy_normalize_conf:\n    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/energy_stats.npz\nrequired:\n- output_dir\n- token_list\ndistributed: false</code></pre></li>\n</ul>", 
    "license": {
      "id": "CC-BY-4.0"
    }, 
    "title": "ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp", 
    "relations": {
      "version": [
        {
          "count": 1, 
          "index": 0, 
          "parent": {
            "pid_type": "recid", 
            "pid_value": "4032223"
          }, 
          "is_last": true, 
          "last_child": {
            "pid_type": "recid", 
            "pid_value": "4032224"
          }
        }
      ]
    }, 
    "communities": [
      {
        "id": "espnet"
      }
    ], 
    "keywords": [
      "ESPnet", 
      "deep-learning", 
      "python", 
      "pytorch", 
      "speech-recognition", 
      "speech-synthesis", 
      "speech-translation", 
      "machine-translation"
    ], 
    "publication_date": "2020-09-16", 
    "creators": [
      {
        "name": "kan-bayashi"
      }
    ], 
    "access_right": "open", 
    "resource_type": {
      "type": "other", 
      "title": "Other"
    }, 
    "related_identifiers": [
      {
        "scheme": "url", 
        "identifier": "https://github.com/espnet/espnet", 
        "relation": "isSupplementTo"
      }, 
      {
        "scheme": "doi", 
        "identifier": "10.5281/zenodo.4032223", 
        "relation": "isVersionOf"
      }
    ]
  }
}
48
634
views
downloads
All versions This version
Views 4848
Downloads 634634
Data volume 94.7 GB94.7 GB
Unique views 4545
Unique downloads 390390

Share

Cite as