Other Open Access

ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp

kan-bayashi


Dublin Core Export

<?xml version='1.0' encoding='utf-8'?>
<oai_dc:dc xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
  <dc:creator>kan-bayashi</dc:creator>
  <dc:date>2020-09-16</dc:date>
  <dc:description>This model was trained by kan-bayashi using jsut/tts1 recipe in espnet.
 

Python APISee https://github.com/espnet/espnet_model_zoo
Evaluate in the recipegit clone https://github.com/espnet/espnet
cd espnet
git checkout d8a028a24cddab209157f62cbb64aca26fefffc0
pip install -e .
cd egs2/jsut/tts1
# Download the model file here
./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave

Configconfig: conf/tuning/train_fastspeech2.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk
ngpu: 1
seed: 0
num_workers: 1
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: 0
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 1000
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
-   - valid
    - loss
    - min
-   - train
    - loss
    - min
keep_nbest_models: 5
grad_clip: 1.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 8
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
pretrain_path: []
pretrain_key: []
num_iters_per_epoch: 500
batch_size: 20
valid_batch_size: null
batch_bins: 3000000
valid_batch_bins: null
train_shape_file:
- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/text_shape.phn
- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/speech_shape
valid_shape_file:
- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/text_shape.phn
- exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/speech_shape
batch_type: numel
valid_batch_type: null
fold_length:
- 150
- 240000
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
-   - dump/raw/tr_no_dev/text
    - text
    - text
-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/tr_no_dev/durations
    - durations
    - text_int
-   - dump/raw/tr_no_dev/wav.scp
    - speech
    - sound
-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/pitch.scp
    - pitch
    - npy
-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/collect_feats/energy.scp
    - energy
    - npy
valid_data_path_and_name_and_type:
-   - dump/raw/dev/text
    - text
    - text
-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/dev/durations
    - durations
    - text_int
-   - dump/raw/dev/wav.scp
    - speech
    - sound
-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/pitch.scp
    - pitch
    - npy
-   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/valid/collect_feats/energy.scp
    - energy
    - npy
allow_variable_data_keys: false
max_cache_size: 0.0
valid_max_cache_size: null
optim: adam
optim_conf:
    lr: 1.0
scheduler: noamlr
scheduler_conf:
    model_size: 384
    warmup_steps: 4000
token_list:
- 
- 
- ty
- dy
- v
- py
- my
- by
- ny
- hy
- gy
- ry
- ky
- f
- p
- z
- ch
- ts
- j
- b
- y
- h
- cl
- I
- U
- w
- g
- d
- sh
- pau
- m
- N
- s
- r
- t
- n
- k
- e
- u
- i
- o
- a
- 
odim: null
model_conf: {}
use_preprocessor: true
token_type: phn
bpemodel: null
non_linguistic_symbols: null
cleaner: jaconv
g2p: pyopenjtalk
feats_extract: fbank
feats_extract_conf:
    fs: 24000
    fmin: 80
    fmax: 7600
    n_mels: 80
    hop_length: 300
    n_fft: 2048
    win_length: 1200
normalize: global_mvn
normalize_conf:
    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/feats_stats.npz
tts: fastspeech2
tts_conf:
    adim: 384
    aheads: 2
    elayers: 4
    eunits: 1536
    dlayers: 4
    dunits: 1536
    positionwise_layer_type: conv1d
    positionwise_conv_kernel_size: 3
    duration_predictor_layers: 2
    duration_predictor_chans: 256
    duration_predictor_kernel_size: 3
    postnet_layers: 5
    postnet_filts: 5
    postnet_chans: 256
    use_masking: true
    use_scaled_pos_enc: true
    encoder_normalize_before: false
    decoder_normalize_before: false
    reduction_factor: 1
    init_type: xavier_uniform
    init_enc_alpha: 1.0
    init_dec_alpha: 1.0
    transformer_enc_dropout_rate: 0.2
    transformer_enc_positional_dropout_rate: 0.2
    transformer_enc_attn_dropout_rate: 0.2
    transformer_dec_dropout_rate: 0.2
    transformer_dec_positional_dropout_rate: 0.2
    transformer_dec_attn_dropout_rate: 0.2
    pitch_predictor_layers: 5
    pitch_predictor_chans: 256
    pitch_predictor_kernel_size: 5
    pitch_predictor_dropout: 0.5
    pitch_embed_kernel_size: 1
    pitch_embed_dropout: 0.0
    stop_gradient_from_pitch_predictor: true
    energy_predictor_layers: 2
    energy_predictor_chans: 256
    energy_predictor_kernel_size: 3
    energy_predictor_dropout: 0.5
    energy_embed_kernel_size: 1
    energy_embed_dropout: 0.0
    stop_gradient_from_energy_predictor: false
pitch_extract: dio
pitch_extract_conf:
    fs: 24000
    n_fft: 2048
    hop_length: 300
    f0max: 400
    f0min: 80
pitch_normalize: global_mvn
pitch_normalize_conf:
    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/pitch_stats.npz
energy_extract: energy
energy_extract_conf:
    fs: 24000
    n_fft: 2048
    hop_length: 300
    win_length: 1200
energy_normalize: global_mvn
energy_normalize_conf:
    stats_file: exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_tacotron2_teacher_forcing_train.loss.best/stats/train/energy_stats.npz
required:
- output_dir
- token_list
distributed: false
</dc:description>
  <dc:identifier>https://zenodo.org/record/4032224</dc:identifier>
  <dc:identifier>10.5281/zenodo.4032224</dc:identifier>
  <dc:identifier>oai:zenodo.org:4032224</dc:identifier>
  <dc:relation>url:https://github.com/espnet/espnet</dc:relation>
  <dc:relation>doi:10.5281/zenodo.4032223</dc:relation>
  <dc:relation>url:https://zenodo.org/communities/espnet</dc:relation>
  <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
  <dc:rights>https://creativecommons.org/licenses/by/4.0/legalcode</dc:rights>
  <dc:subject>ESPnet</dc:subject>
  <dc:subject>deep-learning</dc:subject>
  <dc:subject>python</dc:subject>
  <dc:subject>pytorch</dc:subject>
  <dc:subject>speech-recognition</dc:subject>
  <dc:subject>speech-synthesis</dc:subject>
  <dc:subject>speech-translation</dc:subject>
  <dc:subject>machine-translation</dc:subject>
  <dc:title>ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech2_raw_phn_jaconv_pyopenjtalk_train.loss.ave, fs=24000, lang=jp</dc:title>
  <dc:type>info:eu-repo/semantics/other</dc:type>
  <dc:type>other</dc:type>
</oai_dc:dc>
48
631
views
downloads
All versions This version
Views 4848
Downloads 631631
Data volume 94.2 GB94.2 GB
Unique views 4545
Unique downloads 388388

Share

Cite as