Other Open Access

ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp

kan-bayashi


DCAT Export

<?xml version='1.0' encoding='utf-8'?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:adms="http://www.w3.org/ns/adms#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dct="http://purl.org/dc/terms/" xmlns:dctype="http://purl.org/dc/dcmitype/" xmlns:dcat="http://www.w3.org/ns/dcat#" xmlns:duv="http://www.w3.org/ns/duv#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:frapo="http://purl.org/cerif/frapo/" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gsp="http://www.opengis.net/ont/geosparql#" xmlns:locn="http://www.w3.org/ns/locn#" xmlns:org="http://www.w3.org/ns/org#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:prov="http://www.w3.org/ns/prov#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:schema="http://schema.org/" xmlns:skos="http://www.w3.org/2004/02/skos/core#" xmlns:vcard="http://www.w3.org/2006/vcard/ns#" xmlns:wdrs="http://www.w3.org/2007/05/powder-s#">
  <rdf:Description rdf:about="https://doi.org/10.5281/zenodo.4032224">
    <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://doi.org/10.5281/zenodo.4032224</dct:identifier>
    <foaf:page rdf:resource="https://doi.org/10.5281/zenodo.4032224"/>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>kan-bayashi</foaf:name>
      </rdf:Description>
    </dct:creator>
    <dct:title>ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp</dct:title>
    <dct:publisher>
      <foaf:Agent>
        <foaf:name>Zenodo</foaf:name>
      </foaf:Agent>
    </dct:publisher>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#gYear">2020</dct:issued>
    <dcat:keyword>ESPnet</dcat:keyword>
    <dcat:keyword>deep-learning</dcat:keyword>
    <dcat:keyword>python</dcat:keyword>
    <dcat:keyword>pytorch</dcat:keyword>
    <dcat:keyword>speech-recognition</dcat:keyword>
    <dcat:keyword>speech-synthesis</dcat:keyword>
    <dcat:keyword>speech-translation</dcat:keyword>
    <dcat:keyword>machine-translation</dcat:keyword>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#date">2020-09-16</dct:issued>
    <owl:sameAs rdf:resource="https://zenodo.org/record/4032224"/>
    <adms:identifier>
      <adms:Identifier>
        <skos:notation rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/4032224</skos:notation>
        <adms:schemeAgency>url</adms:schemeAgency>
      </adms:Identifier>
    </adms:identifier>
    <dct:relation rdf:resource="https://github.com/espnet/espnet"/>
    <dct:isVersionOf rdf:resource="https://doi.org/10.5281/zenodo.4032223"/>
    <dct:isPartOf rdf:resource="https://zenodo.org/communities/espnet"/>
    <dct:description>This model was trained by kan-bayashi using jsut/tts1 recipe in &lt;a href="https://github.com/espnet/espnet/"&gt;espnet&lt;/a&gt;. &lt;p&gt;&amp;nbsp;&lt;/p&gt; &lt;ul&gt; &lt;li&gt;&lt;strong&gt;Python API&lt;/strong&gt;&lt;pre&gt;&lt;code class="language-python"&gt;See https://github.com/espnet/espnet_model_zoo&lt;/code&gt;&lt;/pre&gt;&lt;/li&gt; &lt;li&gt;&lt;strong&gt;Evaluate in the recipe&lt;/strong&gt;&lt;pre&gt;&lt;code class="language-bash"&gt;git clone https://github.com/espnet/espnet cd espnet git checkout d8a028a24cddab209157f62cbb64aca26fefffc0 pip install -e . cd egs2/jsut/tts1 # Download the model file here ./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave&lt;/code&gt; &lt;/pre&gt;&lt;/li&gt; &lt;li&gt;&lt;strong&gt;Config&lt;/strong&gt;&lt;pre&gt;&lt;code&gt;config: conf/tuning/train_fastspeech2.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk ngpu: 1 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: 0 dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 1000 patience: null val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - valid - loss - min - - train - loss - min keep_nbest_models: 5 grad_clip: 1.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 8 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null pretrain_path: [] pretrain_key: [] num_iters_per_epoch: 500 batch_size: 20 valid_batch_size: null batch_bins: 3000000 valid_batch_bins: null train_shape_file: - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/text_shape.phn - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/speech_shape valid_shape_file: - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/text_shape.phn - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/speech_shape batch_type: numel valid_batch_type: null fold_length: - 150 - 240000 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/tr_no_dev/text - text - text - - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/tr_no_dev/durations - durations - text_int - - dump/raw/tr_no_dev/wav.scp - speech - sound - - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/pitch.scp - pitch - npy - - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/energy.scp - energy - npy valid_data_path_and_name_and_type: - - dump/raw/dev/text - text - text - - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/dev/durations - durations - text_int - - dump/raw/dev/wav.scp - speech - sound - - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/pitch.scp - pitch - npy - - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/energy.scp - energy - npy allow_variable_data_keys: false max_cache_size: 0.0 valid_max_cache_size: null optim: adam optim_conf: lr: 1.0 scheduler: noamlr scheduler_conf: model_size: 384 warmup_steps: 4000 token_list: - - - ty - dy - v - py - my - by - ny - hy - gy - ry - ky - f - p - z - ch - ts - j - b - y - h - cl - I - U - w - g - d - sh - pau - m - N - s - r - t - n - k - e - u - i - o - a - odim: null model_conf: {} use_preprocessor: true token_type: phn bpemodel: null non_linguistic_symbols: null cleaner: jaconv g2p: pyopenjtalk feats_extract: fbank feats_extract_conf: fs: 24000 fmin: 80 fmax: 7600 n_mels: 80 hop_length: 300 n_fft: 2048 win_length: 1200 normalize: global_mvn normalize_conf: stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/feats_stats.npz tts: fastspeech2 tts_conf: adim: 384 aheads: 2 elayers: 4 eunits: 1536 dlayers: 4 dunits: 1536 positionwise_layer_type: conv1d positionwise_conv_kernel_size: 3 duration_predictor_layers: 2 duration_predictor_chans: 256 duration_predictor_kernel_size: 3 postnet_layers: 5 postnet_filts: 5 postnet_chans: 256 use_masking: true use_scaled_pos_enc: true encoder_normalize_before: false decoder_normalize_before: false reduction_factor: 1 init_type: xavier_uniform init_enc_alpha: 1.0 init_dec_alpha: 1.0 transformer_enc_dropout_rate: 0.2 transformer_enc_positional_dropout_rate: 0.2 transformer_enc_attn_dropout_rate: 0.2 transformer_dec_dropout_rate: 0.2 transformer_dec_positional_dropout_rate: 0.2 transformer_dec_attn_dropout_rate: 0.2 pitch_predictor_layers: 5 pitch_predictor_chans: 256 pitch_predictor_kernel_size: 5 pitch_predictor_dropout: 0.5 pitch_embed_kernel_size: 1 pitch_embed_dropout: 0.0 stop_gradient_from_pitch_predictor: true energy_predictor_layers: 2 energy_predictor_chans: 256 energy_predictor_kernel_size: 3 energy_predictor_dropout: 0.5 energy_embed_kernel_size: 1 energy_embed_dropout: 0.0 stop_gradient_from_energy_predictor: false pitch_extract: dio pitch_extract_conf: fs: 24000 n_fft: 2048 hop_length: 300 f0max: 400 f0min: 80 pitch_normalize: global_mvn pitch_normalize_conf: stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/pitch_stats.npz energy_extract: energy energy_extract_conf: fs: 24000 n_fft: 2048 hop_length: 300 win_length: 1200 energy_normalize: global_mvn energy_normalize_conf: stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/energy_stats.npz required: - output_dir - token_list distributed: false&lt;/code&gt;&lt;/pre&gt;&lt;/li&gt; &lt;/ul&gt;</dct:description>
    <dct:accessRights rdf:resource="http://publications.europa.eu/resource/authority/access-right/PUBLIC"/>
    <dct:accessRights>
      <dct:RightsStatement rdf:about="info:eu-repo/semantics/openAccess">
        <rdfs:label>Open Access</rdfs:label>
      </dct:RightsStatement>
    </dct:accessRights>
    <dct:license rdf:resource="https://creativecommons.org/licenses/by/4.0/legalcode"/>
    <dcat:distribution>
      <dcat:Distribution>
        <dcat:accessURL rdf:resource="https://doi.org/10.5281/zenodo.4032224">https://doi.org/10.5281/zenodo.4032224</dcat:accessURL>
        <dcat:byteSize>149345503</dcat:byteSize>
        <dcat:downloadURL rdf:resource="https://zenodo.org/record/4032224/files/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave.zip">https://zenodo.org/record/4032224/files/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave.zip</dcat:downloadURL>
        <dcat:mediaType>application/zip</dcat:mediaType>
      </dcat:Distribution>
    </dcat:distribution>
  </rdf:Description>
</rdf:RDF>
48
631
views
downloads
All versions This version
Views 4848
Downloads 631631
Data volume 94.2 GB94.2 GB
Unique views 4545
Unique downloads 388388

Share

Cite as