未验证 提交 3d1dfea9 编写于 作者: S shangliang Xu 提交者: GitHub

[dev] add use_epoch in LinearWarmup (#5366)

上级 1acb1852
...@@ -132,19 +132,24 @@ class LinearWarmup(object): ...@@ -132,19 +132,24 @@ class LinearWarmup(object):
Args: Args:
steps (int): warm up steps steps (int): warm up steps
start_factor (float): initial learning rate factor start_factor (float): initial learning rate factor
epochs (int|None): use epochs as warm up steps, the priority
of `epochs` is higher than `steps`. Default: None.
""" """
def __init__(self, steps=500, start_factor=1. / 3): def __init__(self, steps=500, start_factor=1. / 3, epochs=None):
super(LinearWarmup, self).__init__() super(LinearWarmup, self).__init__()
self.steps = steps self.steps = steps
self.start_factor = start_factor self.start_factor = start_factor
self.epochs = epochs
def __call__(self, base_lr, step_per_epoch): def __call__(self, base_lr, step_per_epoch):
boundary = [] boundary = []
value = [] value = []
for i in range(self.steps + 1): warmup_steps = self.epochs * step_per_epoch \
if self.steps > 0: if self.epochs is not None else self.steps
alpha = i / self.steps for i in range(warmup_steps + 1):
if warmup_steps > 0:
alpha = i / warmup_steps
factor = self.start_factor * (1 - alpha) + alpha factor = self.start_factor * (1 - alpha) + alpha
lr = base_lr * factor lr = base_lr * factor
value.append(lr) value.append(lr)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册