===========================train_params=========================== model_name:layoutxlm_ser python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:fp32 Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=17 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=4|whole_train_whole_infer=8 Architecture.Backbone.checkpoints:null train_model_name:latest train_infer_img_dir:ppstructure/docs/vqa/input/zh_val_42.jpg null:null ## trainer:norm_train norm_train:tools/train.py -c configs/kie/layoutlm_series/ser_layoutlm_xfund_zh.yml -o Global.print_batch_step=1 Global.eval_batch_step=[1000,1000] Train.loader.shuffle=false pact_train:null fpgm_train:null distill_train:null null:null null:null ## ===========================eval_params=========================== eval:null null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ Architecture.Backbone.checkpoints: norm_export:tools/export_model.py -c configs/kie/layoutlm_series/ser_layoutlm_xfund_zh.yml -o quant_export: fpgm_export: distill_export:null export1:null export2:null ## infer_model:null infer_export:null infer_quant:False inference:ppstructure/vqa/predict_vqa_token_ser.py --vqa_algorithm=LayoutXLM --ser_dict_path=train_data/XFUND/class_list_xfun.txt --output=output --use_gpu:True|False --enable_mkldnn:False --cpu_threads:6 --rec_batch_num:1 --use_tensorrt:False --precision:fp32 --ser_model_dir: --image_dir:./ppstructure/docs/vqa/input/zh_val_42.jpg null:null --benchmark:False null:null ===========================infer_benchmark_params========================== random_infer_input:[{float32,[3,224,224]}] ===========================train_benchmark_params========================== batch_size:8 fp_items:fp32|fp16 epoch:3 --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98