From 4247fac82ec39cd1c18731a689367108af4a526c Mon Sep 17 00:00:00 2001 From: Tingquan Gao Date: Fri, 4 Aug 2023 12:16:01 +0800 Subject: [PATCH] support Piecewise.learning_rate (#2899) --- ppcls/optimizer/learning_rate.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ppcls/optimizer/learning_rate.py b/ppcls/optimizer/learning_rate.py index ecd54f14..f352fd00 100644 --- a/ppcls/optimizer/learning_rate.py +++ b/ppcls/optimizer/learning_rate.py @@ -403,10 +403,23 @@ class Piecewise(LRBase): warmup_start_lr=0.0, last_epoch=-1, by_epoch=False, + learning_rate=None, **kwargs): + if learning_rate: + decay_epochs = list(range(0, epochs, 30))[1:] + values = [ + learning_rate * (0.1**i) + for i in range(len(decay_epochs) + 1) + ] + logger.warning( + "When 'learning_rate' of Piecewise has beed set, " + "the learning rate scheduler would be set by the rule that lr decay 10 times every 30 epochs. " + f"So, the 'decay_epochs' and 'values' have been set to {decay_epochs} and {values} respectively." + ) super(Piecewise, self).__init__(epochs, step_each_epoch, values[0], warmup_epoch, warmup_start_lr, last_epoch, by_epoch) + self.values = values self.boundaries_steps = [e * step_each_epoch for e in decay_epochs] if self.by_epoch is True: -- GitLab