diff --git a/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml b/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml index 506b5de7ed4266656b845a432b992d22f3557c56..987a9af72ef9e69c5354d53d3c2c74919fea5365 100644 --- a/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml +++ b/configs/mot/fairmot/_base_/optimizer_30e_momentum.yml @@ -7,8 +7,9 @@ LearningRate: gamma: 0.1 milestones: [15, 22] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/configs/mot/jde/_base_/optimizer_30e.yml b/configs/mot/jde/_base_/optimizer_30e.yml index eec33930926877319aff8b00de516068e646aaea..f90439a5c52573ccfa73c208dc82289b7de9ed31 100644 --- a/configs/mot/jde/_base_/optimizer_30e.yml +++ b/configs/mot/jde/_base_/optimizer_30e.yml @@ -7,8 +7,9 @@ LearningRate: gamma: 0.1 milestones: [15, 22] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/configs/mot/jde/_base_/optimizer_60e.yml b/configs/mot/jde/_base_/optimizer_60e.yml index 986764a42f6bfb24d09cb23b49ed6931dbed9352..64b81300ded7b5b43ddab2edaaf0c24da547fe89 100644 --- a/configs/mot/jde/_base_/optimizer_60e.yml +++ b/configs/mot/jde/_base_/optimizer_60e.yml @@ -7,8 +7,9 @@ LearningRate: gamma: 0.1 milestones: [30, 44] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml b/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml index 815bc22909b31fada1c52acdc2dcc0f2481de57e..a1c1de91dc3860b38a1f641cc719cbcfab92d7f3 100644 --- a/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml +++ b/configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml @@ -63,8 +63,9 @@ LearningRate: gamma: 0.1 milestones: [15, 22] use_warmup: True - - !BurninWarmup + - !ExpWarmup steps: 1000 + power: 4 OptimizerBuilder: optimizer: diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index a685613edaa66ff8f88c82ff3ce54bb048dd134d..6701162c4b45f7eb54df50c0ce0be74f3943d0cd 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -150,6 +150,10 @@ class Trainer(object): # build optimizer in train mode if self.mode == 'train': steps_per_epoch = len(self.loader) + if steps_per_epoch < 1: + logger.warning( + "Samples in dataset are less than batch_size, please set smaller batch_size in TrainReader." + ) self.lr = create('LearningRate')(steps_per_epoch) self.optimizer = create('OptimizerBuilder')(self.lr, self.model) diff --git a/ppdet/optimizer/optimizer.py b/ppdet/optimizer/optimizer.py index e8a0dd8c880699044a7af52a314b33bff27c683c..1d42eaa2d756213a05a77ab2ae1716b1b0fcb711 100644 --- a/ppdet/optimizer/optimizer.py +++ b/ppdet/optimizer/optimizer.py @@ -176,6 +176,7 @@ class LinearWarmup(object): value = [] warmup_steps = self.epochs * step_per_epoch \ if self.epochs is not None else self.steps + warmup_steps = max(warmup_steps, 1) for i in range(warmup_steps + 1): if warmup_steps > 0: alpha = i / warmup_steps @@ -187,31 +188,6 @@ class LinearWarmup(object): return boundary, value -@serializable -class BurninWarmup(object): - """ - Warm up learning rate in burnin mode - Args: - steps (int): warm up steps - """ - - def __init__(self, steps=1000): - super(BurninWarmup, self).__init__() - self.steps = steps - - def __call__(self, base_lr, step_per_epoch): - boundary = [] - value = [] - burnin = min(self.steps, step_per_epoch) - for i in range(burnin + 1): - factor = (i * 1.0 / burnin)**4 - lr = base_lr * factor - value.append(lr) - if i > 0: - boundary.append(i) - return boundary, value - - @serializable class ExpWarmup(object): """ @@ -220,19 +196,22 @@ class ExpWarmup(object): steps (int): warm up steps. epochs (int|None): use epochs as warm up steps, the priority of `epochs` is higher than `steps`. Default: None. + power (int): Exponential coefficient. Default: 2. """ - def __init__(self, steps=5, epochs=None): + def __init__(self, steps=1000, epochs=None, power=2): super(ExpWarmup, self).__init__() self.steps = steps self.epochs = epochs + self.power = power def __call__(self, base_lr, step_per_epoch): boundary = [] value = [] warmup_steps = self.epochs * step_per_epoch if self.epochs is not None else self.steps + warmup_steps = max(warmup_steps, 1) for i in range(warmup_steps + 1): - factor = (i / float(warmup_steps))**2 + factor = (i / float(warmup_steps))**self.power value.append(base_lr * factor) if i > 0: boundary.append(i)