Other Open Access

ESPnet2 pretrained model, Chenda Li/wsj0_2mix_enh_train_enh_conv_tasnet_raw_valid.si_snr.ave, fs=8k, lang=en

Chenda Li


Dublin Core Export

<?xml version='1.0' encoding='utf-8'?>
<oai_dc:dc xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
  <dc:creator>Chenda Li</dc:creator>
  <dc:date>2021-02-04</dc:date>
  <dc:description>This model was trained by Chenda Li using wsj0_2mix recipe in espnet.

 


	Python API

	See https://github.com/espnet/espnet_model_zoo
	
	Evaluate in the recipe
	git clone https://github.com/espnet/espnet
cd espnet
git checkout a3334220b0352931677946d178fade3313cf82bb
pip install -e .
cd egs2/wsj0_2mix/enh1
./run.sh --skip_data_prep false --skip_train true --download_model Chenda Li/wsj0_2mix_enh_train_enh_conv_tasnet_raw_valid.si_snr.ave

	
	Results
	
# RESULTS
## Environments
- date: `Thu Feb  4 01:16:18 CST 2021`
- python version: `3.7.6 (default, Jan  8 2020, 19:59:22)  [GCC 7.3.0]`
- espnet version: `espnet 0.9.7`
- pytorch version: `pytorch 1.5.0`
- Git hash: `a3334220b0352931677946d178fade3313cf82bb`
  - Commit date: `Fri Jan 29 23:35:47 2021 +0800`


## enh_train_enh_conv_tasnet_raw

config: ./conf/tuning/train_enh_conv_tasnet.yaml

|dataset|STOI|SAR|SDR|SIR|
|---|---|---|---|---|
|enhanced_cv_min_8k|0.949205|17.3785|16.8028|26.9785|
|enhanced_tt_min_8k|0.95349|16.6221|15.9494|25.9032|
	
	ASR config
	config: ./conf/tuning/train_enh_conv_tasnet.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: chunk
output_dir: exp/enh_train_enh_conv_tasnet_raw
ngpu: 1
seed: 0
num_workers: 4
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: 0
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 100
patience: 4
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
-   - valid
    - si_snr
    - max
-   - valid
    - loss
    - min
keep_nbest_models: 1
grad_clip: 5.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 1
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
unused_parameters: false
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
pretrain_path: null
init_param: []
freeze_param: []
num_iters_per_epoch: null
batch_size: 8
valid_batch_size: null
batch_bins: 1000000
valid_batch_bins: null
train_shape_file:
- exp/enh_stats_8k/train/speech_mix_shape
- exp/enh_stats_8k/train/speech_ref1_shape
- exp/enh_stats_8k/train/speech_ref2_shape
valid_shape_file:
- exp/enh_stats_8k/valid/speech_mix_shape
- exp/enh_stats_8k/valid/speech_ref1_shape
- exp/enh_stats_8k/valid/speech_ref2_shape
batch_type: folded
valid_batch_type: null
fold_length:
- 80000
- 80000
- 80000
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 32000
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
-   - dump/raw/tr_min_8k/wav.scp
    - speech_mix
    - sound
-   - dump/raw/tr_min_8k/spk1.scp
    - speech_ref1
    - sound
-   - dump/raw/tr_min_8k/spk2.scp
    - speech_ref2
    - sound
valid_data_path_and_name_and_type:
-   - dump/raw/cv_min_8k/wav.scp
    - speech_mix
    - sound
-   - dump/raw/cv_min_8k/spk1.scp
    - speech_ref1
    - sound
-   - dump/raw/cv_min_8k/spk2.scp
    - speech_ref2
    - sound
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
    lr: 0.001
    eps: 1.0e-08
    weight_decay: 0
scheduler: reducelronplateau
scheduler_conf:
    mode: min
    factor: 0.5
    patience: 1
init: xavier_uniform
model_conf:
    loss_type: si_snr
use_preprocessor: false
encoder: conv
encoder_conf:
    channel: 256
    kernel_size: 20
    stride: 10
separator: tcn
separator_conf:
    num_spk: 2
    layer: 8
    stack: 4
    bottleneck_dim: 256
    hidden_dim: 512
    kernel: 3
    causal: false
    norm_type: gLN
    nonlinear: relu
decoder: conv
decoder_conf:
    channel: 256
    kernel_size: 20
    stride: 10
required:
- output_dir
version: 0.9.7
distributed: false
	
</dc:description>
  <dc:identifier>https://zenodo.org/record/4498562</dc:identifier>
  <dc:identifier>10.5281/zenodo.4498562</dc:identifier>
  <dc:identifier>oai:zenodo.org:4498562</dc:identifier>
  <dc:relation>url:https://github.com/espnet/espnet</dc:relation>
  <dc:relation>doi:10.5281/zenodo.4498561</dc:relation>
  <dc:relation>url:https://zenodo.org/communities/espnet</dc:relation>
  <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
  <dc:rights>https://creativecommons.org/licenses/by/4.0/legalcode</dc:rights>
  <dc:subject>ESPnet</dc:subject>
  <dc:subject>deep-learning</dc:subject>
  <dc:subject>python</dc:subject>
  <dc:subject>pytorch</dc:subject>
  <dc:subject>speech-separation</dc:subject>
  <dc:subject>speech-recognition</dc:subject>
  <dc:subject>speech-synthesis</dc:subject>
  <dc:subject>speech-translation</dc:subject>
  <dc:subject>machine-translation</dc:subject>
  <dc:title>ESPnet2 pretrained model, Chenda Li/wsj0_2mix_enh_train_enh_conv_tasnet_raw_valid.si_snr.ave, fs=8k, lang=en</dc:title>
  <dc:type>info:eu-repo/semantics/other</dc:type>
  <dc:type>other</dc:type>
</oai_dc:dc>
319
460
views
downloads
All versions This version
Views 319315
Downloads 460460
Data volume 16.2 GB16.2 GB
Unique views 295293
Unique downloads 354354

Share

Cite as