diff --git a/.mergify.yml b/.mergify.yml index 5a1e1ff349e0a56e831a4cba8138af04c3033065..b11fd5c1f6613ea50a40dc4329781a797bca38a0 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -39,6 +39,18 @@ pull_request_rules: actions: label: remove: ["conflicts"] + - name: "auto add label=enhancement" + conditions: + - files~=^deepspeech/ + actions: + label: + add: ["enhancement"] + - name: "auto add label=Example" + conditions: + - files~=^examples/ + actions: + label: + add: ["Example"] - name: "auto add label=README" conditions: - files~=README.md diff --git a/examples/aishell/s1/conf/conformer.yaml b/examples/aishell/s1/conf/conformer.yaml index 40b04ed7bdd1afa4c648a3dae3fb360d122f0d8f..10c3a2822a37b30dad57dae58d534ca8638730e9 100644 --- a/examples/aishell/s1/conf/conformer.yaml +++ b/examples/aishell/s1/conf/conformer.yaml @@ -74,13 +74,13 @@ model: training: - n_epoch: 300 + n_epoch: 240 accum_grad: 2 global_grad_clip: 5.0 optim: adam optim_conf: lr: 0.002 - weight_decay: 1e-06 + weight_decay: 1e-6 scheduler: warmuplr # pytorch v1.1.0+ required scheduler_conf: warmup_steps: 25000 @@ -99,7 +99,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/librispeech/s1/conf/chunk_confermer.yaml b/examples/librispeech/s1/conf/chunk_confermer.yaml index 3ee31e1b26e73e6dab2f5aced80569ed13a00127..0d07d0a7e82cb156f27b83a5f493eaadad3243af 100644 --- a/examples/librispeech/s1/conf/chunk_confermer.yaml +++ b/examples/librispeech/s1/conf/chunk_confermer.yaml @@ -104,7 +104,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/librispeech/s1/conf/chunk_transformer.yaml b/examples/librispeech/s1/conf/chunk_transformer.yaml index 265e6e0b674ddbde24d446816cf147035f3e73a0..3939ffc688e1de5dc66606328e48e2d69459b0b6 100644 --- a/examples/librispeech/s1/conf/chunk_transformer.yaml +++ b/examples/librispeech/s1/conf/chunk_transformer.yaml @@ -97,7 +97,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/librispeech/s1/conf/conformer.yaml b/examples/librispeech/s1/conf/conformer.yaml index 1981b946fd4e0503f83d09e514d894a9d70060b9..f89f82655fa79b2e3a8edc7721382abb94ae9926 100644 --- a/examples/librispeech/s1/conf/conformer.yaml +++ b/examples/librispeech/s1/conf/conformer.yaml @@ -8,7 +8,7 @@ data: spm_model_prefix: 'data/bpe_unigram_5000' mean_std_filepath: "" augmentation_config: conf/augmentation.json - batch_size: 64 + batch_size: 16 min_input_len: 0.5 # seconds max_input_len: 20.0 # seconds min_output_len: 0.0 # tokens @@ -76,7 +76,7 @@ model: training: n_epoch: 120 - accum_grad: 2 + accum_grad: 8 global_grad_clip: 5.0 optim: adam optim_conf: @@ -100,7 +100,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/librispeech/s1/conf/transformer.yaml b/examples/librispeech/s1/conf/transformer.yaml index 8ef9e12f1183550507e711625e99ae28acd2f6ea..9014e5b8443b67c4c0b436092ab21c03dd1d0086 100644 --- a/examples/librispeech/s1/conf/transformer.yaml +++ b/examples/librispeech/s1/conf/transformer.yaml @@ -95,7 +95,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/tiny/s1/conf/chunk_confermer.yaml b/examples/tiny/s1/conf/chunk_confermer.yaml index bd4279e2be5adc71937afb524c5739c689bae074..79006626408823732ba74838ebece5927b6a88f0 100644 --- a/examples/tiny/s1/conf/chunk_confermer.yaml +++ b/examples/tiny/s1/conf/chunk_confermer.yaml @@ -104,7 +104,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/tiny/s1/conf/chunk_transformer.yaml b/examples/tiny/s1/conf/chunk_transformer.yaml index ba60c273564540a627e7da3e9f4502ed4f2bb28a..aa2b145a681dff821d4695f96be8aef35d674a5e 100644 --- a/examples/tiny/s1/conf/chunk_transformer.yaml +++ b/examples/tiny/s1/conf/chunk_transformer.yaml @@ -97,7 +97,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/tiny/s1/conf/conformer.yaml b/examples/tiny/s1/conf/conformer.yaml index 83f4f5af46100735f5fc78761c4cc21901fe15e7..3813daa04a516c143d7a545cd28999518fecf2d8 100644 --- a/examples/tiny/s1/conf/conformer.yaml +++ b/examples/tiny/s1/conf/conformer.yaml @@ -100,7 +100,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. diff --git a/examples/tiny/s1/conf/transformer.yaml b/examples/tiny/s1/conf/transformer.yaml index 3f3170bdfab088a85b7d365834e8f659c42f31bd..35c11731cc3acbc17fe4cf4c410f731b3b384e3c 100644 --- a/examples/tiny/s1/conf/transformer.yaml +++ b/examples/tiny/s1/conf/transformer.yaml @@ -95,7 +95,7 @@ decoding: cutoff_prob: 1.0 cutoff_top_n: 0 num_proc_bsearch: 8 - ctc_weight: 0.0 # ctc weight for attention rescoring decode mode. + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set.