transformer_fr.yaml 2.5 KB
Newer Older
J
Junkun 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
# https://yaml.org/type/float.html
###########################################
#                   Data                  #
###########################################
train_manifest: data/manifest.fr.train
dev_manifest: data/manifest.fr.dev
test_manifest: data/manifest.fr.test

###########################################
#              Dataloader                 #
###########################################
vocab_filepath: data/lang_1spm/train_sp.en-fr.fr_bpe8000_units_tc.txt
unit_type: 'spm'
spm_model_prefix: data/lang_1spm/train_sp.en-fr.fr_bpe8000_tc
mean_std_filepath: ""
# preprocess_config: conf/augmentation.json
batch_size: 20
feat_dim: 83
stride_ms: 10.0
window_ms: 25.0
sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs 
maxlen_in: 512  # if input length  > maxlen-in, batchsize is automatically reduced
maxlen_out: 150  # if output length > maxlen-out, batchsize is automatically reduced
minibatches: 0 # for debug
batch_count: auto
batch_bins: 0 
batch_frames_in: 0
batch_frames_out: 0
batch_frames_inout: 0
preprocess_config:
num_workers: 0
subsampling_factor: 1
num_encs: 1


############################################
#           Network Architecture           #
############################################
cmvn_file: None
cmvn_file_type: "json"
# encoder related
encoder: transformer
encoder_conf:
    output_size: 256    # dimension of attention
    attention_heads: 4
    linear_units: 2048  # the number of units of position-wise feed forward
    num_blocks: 12      # the number of encoder blocks
    dropout_rate: 0.1
    positional_dropout_rate: 0.1
    attention_dropout_rate: 0.0
    input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8
    normalize_before: true

# decoder related
decoder: transformer
decoder_conf:
    attention_heads: 4
    linear_units: 2048
    num_blocks: 6
    dropout_rate: 0.1
    positional_dropout_rate: 0.1
    self_attention_dropout_rate: 0.0
    src_attention_dropout_rate: 0.0

# hybrid CTC/attention
model_conf:
    asr_weight: 0.0
    ctc_weight: 0.0
    lsm_weight: 0.1     # label smoothing option
    length_normalized_loss: false


###########################################
#                Training                 #
###########################################
n_epoch: 40
accum_grad: 2
global_grad_clip: 5.0
optim: adam
optim_conf:
  lr: 2.5
  weight_decay: 0.
scheduler: noam    
scheduler_conf:
  warmup_steps: 25000
  lr_decay: 1.0
log_interval: 50
checkpoint:
  kbest_n: 50
  latest_n: 5