From 7b7fde9752cd9cd4905d642996215a158bf8d026 Mon Sep 17 00:00:00 2001 From: D-Keqi <61508571+D-Keqi@users.noreply.github.com> Date: Thu, 7 Apr 2022 17:09:27 +0800 Subject: [PATCH] streaming slu --- .../conf/train_asr_streaming_transformer.yaml | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 egs2/fsc/asr1/conf/train_asr_streaming_transformer.yaml diff --git a/egs2/fsc/asr1/conf/train_asr_streaming_transformer.yaml b/egs2/fsc/asr1/conf/train_asr_streaming_transformer.yaml new file mode 100644 index 00000000000..7a344c514cf --- /dev/null +++ b/egs2/fsc/asr1/conf/train_asr_streaming_transformer.yaml @@ -0,0 +1,58 @@ +# network architecture +# encoder related +encoder: contextual_block_transformer +encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder architecture type + normalize_before: true + block_size: 40 + hop_size: 16 + look_ahead: 16 + init_average: true + ctx_pos_enc: true + +# decoder related +decoder: transformer +decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + +# minibatch related +batch_type: folded +batch_size: 64 +max_epoch: 200 +keep_nbest_models: 5 + +optim: adam +optim_conf: + lr: 0.0002 +scheduler: warmuplr # pytorch v1.1.0+ required +scheduler_conf: + warmup_steps: 25000 +num_att_plot: 0 +specaug: specaug +specaug_conf: + apply_time_warp: true + time_warp_window: 5 + time_warp_mode: bicubic + apply_freq_mask: true + freq_mask_width_range: + - 0 + - 30 + num_freq_mask: 2 + apply_time_mask: true + time_mask_width_range: + - 0 + - 40 + num_time_mask: 2