#load model vocab_path="ernie_gen_large/vocab.txt" config_path="ernie_gen_large/ernie_config.json" init_model="ernie_gen_large/params" #for multi-turn dialog/qa task_type="dialog" role_type_size=3 turn_type_size=16 #input max_src_len=472 max_tgt_len=40 tokenized_input="true" continuous_position="true" batch_size=8 in_tokens="false" #decode do_decode="true" max_dec_len=32 beam_size=10 length_penalty=1.3 use_multi_gpu_test="true" #train epoch=30 weight_decay=0.01 label_smooth=0.0 hidden_dropout_prob=0.1 save_and_valid_by_epoch="true" #lr warmup_proportion=0.1 lr_scheduler="linear_warmup_decay" learning_rate=1e-4 #noise random_noise="false" noise_prob=0.0 #dataset data_path="./datasets/persona_chat/" train_set="train.tsv" dev_set="dev.2k.tsv" pred_set="test.tsv" do_train="true" do_val="true" do_test="false" do_pred="true" do_decode="true" #evaluate eval_script="sh ./eval/tasks/persona_chat/eval.sh" eval_mertrics="bleu_1,bleu_2,distinct_1,distinct_2"