From 4708b0811ee99bf9d4cf23f309817675191a434a Mon Sep 17 00:00:00 2001 From: Feng Ni Date: Tue, 23 Aug 2022 22:27:32 +0800 Subject: [PATCH] fix iters less than batchsize in warmup (#6724) --- .../fairmot/_base_/optimizer_30e_momentum.yml | 3 +- configs/mot/jde/_base_/optimizer_30e.yml | 3 +- configs/mot/jde/_base_/optimizer_60e.yml | 3 +- ..._1088x608_visdrone_vehicle_bytetracker.yml | 3 +- ppdet/engine/trainer.py | 4 +++ ppdet/optimizer/optimizer.py | 33 ++++--------------- 6 files changed, 18 insertions(+), 31 deletions(-) diff --git a/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml b/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml index 506b5de7e..987a9af72 100644 --- a/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml +++ b/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml @@ -7,8 +7,9 @@ LearningRate: gamma: 0.1 milestones: [15, 22] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/configs/mot/jde/_base_/optimizer_30e.yml b/configs/mot/jde/_base_/optimizer_30e.yml index eec339309..f90439a5c 100644 --- a/configs/mot/jde/_base_/optimizer_30e.yml +++ b/configs/mot/jde/_base_/optimizer_30e.yml @@ -7,8 +7,9 @@ LearningRate: gamma: 0.1 milestones: [15, 22] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/configs/mot/jde/_base_/optimizer_60e.yml b/configs/mot/jde/_base_/optimizer_60e.yml index 986764a42..64b81300d 100644 --- a/configs/mot/jde/_base_/optimizer_60e.yml +++ b/configs/mot/jde/_base_/optimizer_60e.yml @@ -7,8 +7,9 @@ LearningRate: gamma: 0.1 milestones: [30, 44] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml b/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml index 815bc2290..a1c1de91d 100644 --- a/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml +++ b/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml @@ -63,8 +63,9 @@ LearningRate: gamma: 0.1 milestones: [15, 22] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index a685613ed..6701162c4 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -150,6 +150,10 @@ class Trainer(object): # build optimizer in train mode if self.mode == 'train': steps_per_epoch = len(self.loader) + if steps_per_epoch < 1: + logger.warning( + "Samples in dataset are less than batch_size, please set smaller batch_size in TrainReader." + ) self.lr = create('LearningRate')(steps_per_epoch) self.optimizer = create('OptimizerBuilder')(self.lr, self.model) diff --git a/ppdet/optimizer/optimizer.py b/ppdet/optimizer/optimizer.py index e8a0dd8c8..1d42eaa2d 100644 --- a/ppdet/optimizer/optimizer.py +++ b/ppdet/optimizer/optimizer.py @@ -176,6 +176,7 @@ class LinearWarmup(object): value = [] warmup_steps = self.epochs * step_per_epoch \ if self.epochs is not None else self.steps + warmup_steps = max(warmup_steps, 1) for i in range(warmup_steps + 1): if warmup_steps > 0: alpha = i / warmup_steps @@ -187,31 +188,6 @@ class LinearWarmup(object): return boundary, value -@serializable -class BurninWarmup(object): - """ - Warm up learning rate in burnin mode - Args: - steps (int): warm up steps - """ - - def __init__(self, steps=1000): - super(BurninWarmup, self).__init__() - self.steps = steps - - def __call__(self, base_lr, step_per_epoch): - boundary = [] - value = [] - burnin = min(self.steps, step_per_epoch) - for i in range(burnin + 1): - factor = (i * 1.0 / burnin)**4 - lr = base_lr * factor - value.append(lr) - if i > 0: - boundary.append(i) - return boundary, value - - @serializable class ExpWarmup(object): """ @@ -220,19 +196,22 @@ class ExpWarmup(object): steps (int): warm up steps. epochs (int|None): use epochs as warm up steps, the priority of `epochs` is higher than `steps`. Default: None. + power (int): Exponential coefficient. Default: 2. """ - def __init__(self, steps=5, epochs=None): + def __init__(self, steps=1000, epochs=None, power=2): super(ExpWarmup, self).__init__() self.steps = steps self.epochs = epochs + self.power = power def __call__(self, base_lr, step_per_epoch): boundary = [] value = [] warmup_steps = self.epochs * step_per_epoch if self.epochs is not None else self.steps + warmup_steps = max(warmup_steps, 1) for i in range(warmup_steps + 1): - factor = (i / float(warmup_steps))**2 + factor = (i / float(warmup_steps))**self.power value.append(base_lr * factor) if i > 0: boundary.append(i) -- GitLab