epoch: 30 LearningRate: base_lr: 0.01 schedulers: - !PiecewiseDecay gamma: 0.1 milestones: [15, 22] use_warmup: True - !ExpWarmup steps: 1000 power: 4 OptimizerBuilder: optimizer: type: Momentum regularizer: factor: 0.0001 type: L2