Published August 15, 2020 | Version v1
Other Open

ESPnet2 pretrained model, kan-bayashi/jsut_tts_train_fastspeech_raw_phn_jaconv_pyopenjtalk_train.loss.best, fs=24000, lang=jp

Creators

Description

This model was trained by kan-bayashi using jsut/tts1 recipe in espnet.

 

  • Python API
    See https://github.com/espnet/espnet_model_zoo
  • Evaluate in the recipe
    git clone https://github.com/espnet/espnet
    cd espnet
    git checkout b4413f6259c49d2543db1e10417c08118a09d990
    pip install -e .
    cd egs2/jsut/tts1
    # Download the model file here
    ./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/jsut_tts_train_fastspeech_raw_phn_jaconv_pyopenjtalk_train.loss.best
    
  • Config
    config: conf/tuning/train_fastspeech.yaml
    print_config: false
    log_level: INFO
    dry_run: false
    iterator_type: sequence
    output_dir: exp/tts_train_fastspeech_raw_phn_jaconv_pyopenjtalk
    ngpu: 1
    seed: 0
    num_workers: 1
    num_att_plot: 3
    dist_backend: nccl
    dist_init_method: env://
    dist_world_size: 2
    dist_rank: 1
    local_rank: 1
    dist_master_addr: localhost
    dist_master_port: 58341
    dist_launcher: null
    multiprocessing_distributed: true
    cudnn_enabled: true
    cudnn_benchmark: false
    cudnn_deterministic: true
    collect_stats: false
    write_collected_feats: false
    max_epoch: 1000
    patience: null
    val_scheduler_criterion:
    - valid
    - loss
    early_stopping_criterion:
    - valid
    - loss
    - min
    best_model_criterion:
    -   - valid
        - loss
        - min
    -   - train
        - loss
        - min
    keep_nbest_models: 5
    grad_clip: 1.0
    grad_noise: false
    accum_grad: 2
    no_forward_run: false
    resume: true
    train_dtype: float32
    log_interval: null
    pretrain_path: []
    pretrain_key: []
    num_iters_per_epoch: null
    batch_size: 20
    valid_batch_size: null
    batch_bins: 2400000
    valid_batch_bins: null
    train_shape_file:
    - exp/tts_stats_raw_phn_jaconv_pyopenjtalk/train/text_shape.phn
    - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_train.loss.best/tr_no_dev/speech_shape
    valid_shape_file:
    - exp/tts_stats_raw_phn_jaconv_pyopenjtalk/valid/text_shape.phn
    - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_train.loss.best/dev/speech_shape
    batch_type: numel
    valid_batch_type: null
    fold_length:
    - 150
    - 800
    sort_in_batch: descending
    sort_batch: descending
    multiple_iterator: false
    chunk_length: 500
    chunk_shift_ratio: 0.5
    num_cache_chunks: 1024
    train_data_path_and_name_and_type:
    -   - dump/raw/tr_no_dev/text
        - text
        - text
    -   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_train.loss.best/tr_no_dev/denorm/feats.scp
        - speech
        - npy
    -   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_train.loss.best/tr_no_dev/durations
        - durations
        - text_int
    valid_data_path_and_name_and_type:
    -   - dump/raw/dev/text
        - text
        - text
    -   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_train.loss.best/dev/denorm/feats.scp
        - speech
        - npy
    -   - exp/tts_train_tacotron2_raw_phn_jaconv_pyopenjtalk/decode_train.loss.best/dev/durations
        - durations
        - text_int
    allow_variable_data_keys: false
    max_cache_size: 0.0
    valid_max_cache_size: null
    optim: adam
    optim_conf:
        lr: 1.0
    scheduler: noamlr
    scheduler_conf:
        model_size: 384
        warmup_steps: 4000
    token_list:
    - 
    - 
    - ty
    - dy
    - v
    - py
    - my
    - by
    - ny
    - hy
    - gy
    - ry
    - ky
    - f
    - p
    - z
    - ch
    - ts
    - j
    - b
    - y
    - h
    - cl
    - I
    - U
    - w
    - g
    - d
    - sh
    - pau
    - m
    - N
    - s
    - r
    - t
    - n
    - k
    - e
    - u
    - i
    - o
    - a
    - 
    odim: 80
    model_conf: {}
    use_preprocessor: true
    token_type: phn
    bpemodel: null
    non_linguistic_symbols: null
    cleaner: jaconv
    g2p: pyopenjtalk
    feats_extract: null
    feats_extract_conf: null
    normalize: global_mvn
    normalize_conf:
        stats_file: exp/tts_stats_raw_phn_jaconv_pyopenjtalk/train/feats_stats.npz
    tts: fastspeech
    tts_conf:
        adim: 384
        aheads: 4
        elayers: 6
        eunits: 1536
        dlayers: 6
        dunits: 1536
        positionwise_layer_type: conv1d
        positionwise_conv_kernel_size: 3
        duration_predictor_layers: 2
        duration_predictor_chans: 384
        duration_predictor_kernel_size: 3
        postnet_layers: 5
        postnet_filts: 5
        postnet_chans: 256
        use_masking: true
        use_scaled_pos_enc: true
        encoder_normalize_before: false
        decoder_normalize_before: false
        reduction_factor: 1
        init_type: xavier_uniform
        init_enc_alpha: 1.0
        init_dec_alpha: 1.0
        transformer_enc_dropout_rate: 0.1
        transformer_enc_positional_dropout_rate: 0.1
        transformer_enc_attn_dropout_rate: 0.1
        transformer_dec_dropout_rate: 0.1
        transformer_dec_positional_dropout_rate: 0.1
        transformer_dec_attn_dropout_rate: 0.1
    required:
    - output_dir
    - token_list
    distributed: true

Files

tts_train_fastspeech_raw_phn_jaconv_pyopenjtalk_train.loss.best.zip

Files (207.0 MB)

Additional details

Related works

Is supplement to
https://github.com/espnet/espnet (URL)