epoch: 60 LearningRate: base_lr: 0.01 schedulers: - !PiecewiseDecay gamma: 0.1 milestones: [30, 44] use_warmup: True
- !ExpWarmup
steps: 1000
power: 4
OptimizerBuilder: optimizer: momentum: 0.9 type: Momentum regularizer: factor: 0.0001 type: L2