layoutlmv2_funsd.yml 3.2 KB
Newer Older
文幕地方's avatar
文幕地方 已提交
1 2 3 4 5
Global:
  use_gpu: True
  epoch_num: &epoch_num 200
  log_smooth_window: 10
  print_batch_step: 10
6
  save_model_dir: ./output/re_layoutlmv2_funsd
文幕地方's avatar
文幕地方 已提交
7 8
  save_epoch_step: 2000
  # evaluation is run every 10 iterations after the 0th iteration
9
  eval_batch_step: [ 0, 57 ]
文幕地方's avatar
文幕地方 已提交
10 11 12
  cal_metric_during_train: False
  save_inference_dir:
  use_visualdl: False
13 14 15
  seed: 2022
  infer_img: train_data/FUNSD/testing_data/images/83624198.png
  save_res_path: ./output/re_layoutlmv2_funsd/res/
文幕地方's avatar
文幕地方 已提交
16 17 18 19 20 21 22 23

Architecture:
  model_type: vqa
  algorithm: &algorithm "LayoutLMv2"
  Transform:
  Backbone:
    name: LayoutLMv2ForRe
    pretrained: True
24
    checkpoints:
文幕地方's avatar
文幕地方 已提交
25 26 27 28 29 30 31 32 33 34 35 36

Loss:
  name: LossFromOutput
  key: loss
  reduction: mean

Optimizer:
  name: AdamW
  beta1: 0.9
  beta2: 0.999
  clip_norm: 10
  lr:
文幕地方's avatar
文幕地方 已提交
37 38
    learning_rate: 0.00005
    warmup_epoch: 10
文幕地方's avatar
文幕地方 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52
  regularizer:
    name: L2
    factor: 0.00000
    
PostProcess:
  name: VQAReTokenLayoutLMPostProcess

Metric:
  name: VQAReTokenMetric
  main_indicator: hmean

Train:
  dataset:
    name: SimpleDataSet
53
    data_dir: ./train_data/FUNSD/training_data/images/
文幕地方's avatar
文幕地方 已提交
54
    label_file_list: 
55
      - ./train_data/FUNSD/train.json
文幕地方's avatar
文幕地方 已提交
56 57 58 59 60 61 62 63
    ratio_list: [ 1.0 ]
    transforms:
      - DecodeImage: # load image
          img_mode: RGB
          channel_first: False
      - VQATokenLabelEncode: # Class handling label
          contains_re: True
          algorithm: *algorithm
64
          class_path: &class_path train_data/FUNSD/class_list.txt
文幕地方's avatar
文幕地方 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
      - VQATokenPad:
          max_seq_len: &max_seq_len 512
          return_attention_mask: True
      - VQAReTokenRelation:
      - VQAReTokenChunk:
          max_seq_len: *max_seq_len
      - Resize:
          size: [224,224]
      - NormalizeImage:
          scale: 1./255.
          mean: [0.485, 0.456, 0.406]
          std: [0.229, 0.224, 0.225]
          order: 'hwc'
      - ToCHWImage:
      - KeepKeys:
80 81
          # dataloader will return list in this order
          keep_keys: [ 'input_ids', 'bbox', 'attention_mask', 'token_type_ids', 'image', 'entities', 'relations']
文幕地方's avatar
文幕地方 已提交
82 83 84 85 86 87 88 89 90 91
  loader:
    shuffle: True
    drop_last: False
    batch_size_per_card: 8
    num_workers: 8
    collate_fn: ListCollator

Eval:
  dataset:
    name: SimpleDataSet
92 93 94
    data_dir: ./train_data/FUNSD/testing_data/images/
    label_file_list: 
      - ./train_data/FUNSD/test.json
文幕地方's avatar
文幕地方 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
    transforms:
      - DecodeImage: # load image
          img_mode: RGB
          channel_first: False
      - VQATokenLabelEncode: # Class handling label
          contains_re: True
          algorithm: *algorithm
          class_path: *class_path
      - VQATokenPad:
          max_seq_len: *max_seq_len
          return_attention_mask: True
      - VQAReTokenRelation:
      - VQAReTokenChunk:
          max_seq_len: *max_seq_len
      - Resize:
          size: [224,224]
      - NormalizeImage:
          scale: 1./255.
          mean: [0.485, 0.456, 0.406]
          std: [0.229, 0.224, 0.225]
          order: 'hwc'
      - ToCHWImage:
      - KeepKeys:
118 119
          # dataloader will return list in this order
          keep_keys: [ 'input_ids', 'bbox', 'attention_mask', 'token_type_ids', 'image', 'entities', 'relations']
文幕地方's avatar
文幕地方 已提交
120 121 122 123 124 125
  loader:
    shuffle: False
    drop_last: False
    batch_size_per_card: 8
    num_workers: 8
    collate_fn: ListCollator