rec_r34_vd_tps_bilstm_attn.yml 1.0 KB
Newer Older
L
LDOUBLEV 已提交
1 2 3 4 5 6
Global:
  algorithm: RARE
  use_gpu: true
  epoch_num: 72
  log_smooth_window: 20
  print_batch_step: 10
L
LDOUBLEV 已提交
7
  save_model_dir: output/rec_RARE
L
LDOUBLEV 已提交
8 9 10 11 12 13 14 15 16
  save_epoch_step: 3
  eval_batch_step: 2000
  train_batch_size_per_card: 256
  test_batch_size_per_card: 256
  image_shape: [3, 32, 100]
  max_text_length: 25
  character_type: en
  loss_type: attention
  reader_yml: ./configs/rec/rec_benchmark_reader.yml
17 18 19
  pretrain_weights:
  checkpoints:
  save_inference_dir:
L
LDOUBLEV 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50

Architecture:
  function: ppocr.modeling.architectures.rec_model,RecModel

TPS:
  function: ppocr.modeling.stns.tps,TPS
  num_fiducial: 20
  loc_lr: 0.1
  model_name: large

Backbone:
  function: ppocr.modeling.backbones.rec_resnet_vd,ResNet
  layers: 34
 
Head:
  function: ppocr.modeling.heads.rec_attention_head,AttentionPredict
  encoder_type: rnn
  SeqRNN:
    hidden_size: 256
  Attention:
    decoder_size: 128
    word_vector_dim: 128
  
Loss:
  function: ppocr.modeling.losses.rec_attention_loss,AttentionLoss
  
Optimizer:
  function: ppocr.optimizer,AdamDecay
  base_lr: 0.001
  beta1: 0.9
  beta2: 0.999