Published September 20, 2020 | Version v1
Other Open

ESPnet2 pretrained model, kan-bayashi/ljspeech_tts_train_transformer_raw_phn_tacotron_g2p_en_no_space_train.loss.ave, fs=22050, lang=en

Creators

Description

This model was trained by kan-bayashi using ljspeech/tts1 recipe in espnet.

 

  • Python API
    See https://github.com/espnet/espnet_model_zoo
  • Evaluate in the recipe
    git clone https://github.com/espnet/espnet
    cd espnet
    git checkout 67ca53d61932e4ff3325f1ea7215eead03f300bb
    pip install -e .
    cd egs2/ljspeech/tts1
    # Download the model file here
    ./run.sh --skip_data_prep false --skip_train true --download_model kan-bayashi/ljspeech_tts_train_transformer_raw_phn_tacotron_g2p_en_no_space_train.loss.ave
    
  • Config
    config: conf/tuning/train_transformer.yaml
    print_config: false
    log_level: INFO
    dry_run: false
    iterator_type: sequence
    output_dir: exp/tts_train_transformer_raw_phn_tacotron_g2p_en_no_space
    ngpu: 1
    seed: 0
    num_workers: 1
    num_att_plot: 3
    dist_backend: nccl
    dist_init_method: env://
    dist_world_size: 4
    dist_rank: 0
    local_rank: 0
    dist_master_addr: localhost
    dist_master_port: 53481
    dist_launcher: null
    multiprocessing_distributed: true
    cudnn_enabled: true
    cudnn_benchmark: false
    cudnn_deterministic: true
    collect_stats: false
    write_collected_feats: false
    max_epoch: 200
    patience: null
    val_scheduler_criterion:
    - valid
    - loss
    early_stopping_criterion:
    - valid
    - loss
    - min
    best_model_criterion:
    -   - valid
        - loss
        - min
    -   - train
        - loss
        - min
    keep_nbest_models: 5
    grad_clip: 1.0
    grad_clip_type: 2.0
    grad_noise: false
    accum_grad: 2
    no_forward_run: false
    resume: true
    train_dtype: float32
    use_amp: false
    log_interval: null
    pretrain_path: []
    pretrain_key: []
    num_iters_per_epoch: 1000
    batch_size: 20
    valid_batch_size: null
    batch_bins: 9000000
    valid_batch_bins: null
    train_shape_file:
    - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/train/text_shape.phn
    - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/train/speech_shape
    valid_shape_file:
    - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/valid/text_shape.phn
    - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/valid/speech_shape
    batch_type: numel
    valid_batch_type: null
    fold_length:
    - 150
    - 204800
    sort_in_batch: descending
    sort_batch: descending
    multiple_iterator: false
    chunk_length: 500
    chunk_shift_ratio: 0.5
    num_cache_chunks: 1024
    train_data_path_and_name_and_type:
    -   - dump/raw/tr_no_dev/text
        - text
        - text
    -   - dump/raw/tr_no_dev/wav.scp
        - speech
        - sound
    valid_data_path_and_name_and_type:
    -   - dump/raw/dev/text
        - text
        - text
    -   - dump/raw/dev/wav.scp
        - speech
        - sound
    allow_variable_data_keys: false
    max_cache_size: 0.0
    valid_max_cache_size: null
    optim: adam
    optim_conf:
        lr: 1.0
    scheduler: noamlr
    scheduler_conf:
        model_size: 512
        warmup_steps: 8000
    token_list:
    - 
    - 
    - AH0
    - N
    - T
    - D
    - S
    - R
    - L
    - DH
    - K
    - Z
    - IH1
    - IH0
    - M
    - EH1
    - W
    - P
    - AE1
    - AH1
    - V
    - ER0
    - F
    - ','
    - AA1
    - B
    - HH
    - IY1
    - UW1
    - IY0
    - AO1
    - EY1
    - AY1
    - .
    - OW1
    - SH
    - NG
    - G
    - ER1
    - CH
    - JH
    - Y
    - AW1
    - TH
    - UH1
    - EH2
    - OW0
    - EY2
    - AO0
    - IH2
    - AE2
    - AY2
    - AA2
    - UW0
    - EH0
    - OY1
    - EY0
    - AO2
    - ZH
    - OW2
    - AE0
    - UW2
    - AH2
    - AY0
    - IY2
    - AW2
    - AA0
    - ''''
    - ER2
    - UH2
    - '?'
    - OY2
    - '!'
    - AW0
    - UH0
    - OY0
    - ..
    - 
    odim: null
    model_conf: {}
    use_preprocessor: true
    token_type: phn
    bpemodel: null
    non_linguistic_symbols: null
    cleaner: tacotron
    g2p: g2p_en_no_space
    feats_extract: fbank
    feats_extract_conf:
        fs: 22050
        fmin: 80
        fmax: 7600
        n_mels: 80
        hop_length: 256
        n_fft: 1024
        win_length: null
    normalize: global_mvn
    normalize_conf:
        stats_file: exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/train/feats_stats.npz
    tts: transformer
    tts_conf:
        embed_dim: 0
        eprenet_conv_layers: 0
        eprenet_conv_filts: 0
        eprenet_conv_chans: 0
        dprenet_layers: 2
        dprenet_units: 256
        adim: 512
        aheads: 8
        elayers: 6
        eunits: 1024
        dlayers: 6
        dunits: 1024
        positionwise_layer_type: conv1d
        positionwise_conv_kernel_size: 1
        postnet_layers: 5
        postnet_filts: 5
        postnet_chans: 256
        use_masking: true
        bce_pos_weight: 5.0
        use_scaled_pos_enc: true
        encoder_normalize_before: true
        decoder_normalize_before: true
        reduction_factor: 1
        init_type: xavier_uniform
        init_enc_alpha: 1.0
        init_dec_alpha: 1.0
        eprenet_dropout_rate: 0.0
        dprenet_dropout_rate: 0.5
        postnet_dropout_rate: 0.5
        transformer_enc_dropout_rate: 0.1
        transformer_enc_positional_dropout_rate: 0.1
        transformer_enc_attn_dropout_rate: 0.1
        transformer_dec_dropout_rate: 0.1
        transformer_dec_positional_dropout_rate: 0.1
        transformer_dec_attn_dropout_rate: 0.1
        transformer_enc_dec_attn_dropout_rate: 0.1
        use_guided_attn_loss: true
        num_heads_applied_guided_attn: 2
        num_layers_applied_guided_attn: 2
        modules_applied_guided_attn:
        - encoder-decoder
        guided_attn_loss_lambda: 10.0
    pitch_extract: null
    pitch_extract_conf: {}
    pitch_normalize: null
    pitch_normalize_conf: {}
    energy_extract: null
    energy_extract_conf: {}
    energy_normalize: null
    energy_normalize_conf: {}
    required:
    - output_dir
    - token_list
    distributed: true

Files

tts_train_transformer_raw_phn_tacotron_g2p_en_no_space_train.loss.ave.zip

Files (133.1 MB)

Additional details

Related works

Is supplement to
https://github.com/espnet/espnet (URL)