optimizer_90k.yml 804 字节
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
epoch: 30 # employ iter to control shedule
LearningRate:
  base_lr: 0.02 # 0.02 for 8*(4+4) batch
  schedulers:
  - !PiecewiseDecay
    gamma: 0.1
    milestones: [300] # do not decay lr
  - !LinearWarmup
    start_factor: 0.3333333333333333
    steps: 1000

max_iter: 90000 # 90k for 32 batch, 180k for 16 batch
epoch_iter: 1000 # set epoch_iter for saving checkpoint and eval
# update student params according to loss_grad every X iter.
optimize_rate: 1
SEMISUPNET:
  BBOX_THRESHOLD: 0.5 # not used
  TEACHER_UPDATE_ITER: 1
  BURN_UP_STEP: 9000
  EMA_KEEP_RATE: 0.9996
  UNSUP_LOSS_WEIGHT: 1.0 # detailed weights for cls and loc task can be seen in cr_loss
  PSEUDO_WARM_UP_STEPS: 2000

OptimizerBuilder:
  optimizer:
    momentum: 0.9
    type: Momentum
  regularizer:
    factor: 0.0001
    type: L2