step_each_epoch (int): number of iterations within an epoch
learning_rate (float): learning rate
warmup_epoch (int): number of warmup epoch(s)
warmup_start_lr (float): start learning rate within warmup
last_epoch (int): last epoch
by_epoch (bool): learning rate decays by epoch when by_epoch is True, else by iter
verbose (bool): If True, prints a message to stdout for each update. Defaults to False
"""
def__init__(self,
epochs:int,
step_each_epoch:int,
learning_rate:float,
warmup_epoch:int,
warmup_start_lr:float,
last_epoch:int,
by_epoch:bool,
verbose:bool=False)->None:
"""Initialize and record the necessary parameters
"""
super(LRBase,self).__init__()
ifwarmup_epoch>=epochs:
msg=f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}."
step_each_epoch (int): number of iterations within an epoch
learning_rate (float): learning rate
end_lr (float, optional): The minimum final learning rate. Defaults to 0.0.
power (float, optional): Power of polynomial. Defaults to 1.0.
warmup_epoch (int): number of warmup epoch(s)
warmup_start_lr (float): start learning rate within warmup
last_epoch (int): last epoch
by_epoch (bool): learning rate decays by epoch when by_epoch is True, else by iter
"""
def__init__(self,
epochs,
step_each_epoch,
learning_rate,
end_lr=0.0,
power=1.0,
cycle=False,
warmup_epoch=0,
warmup_start_lr=0.0,
last_epoch=-1,
by_epoch=False,
**kwargs):
super().__init__()
ifwarmup_epoch>=epochs:
msg=f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}."
warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0.
warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
epochs (int): total epoch(s)
step_each_epoch (int): number of iterations within an epoch
learning_rate (float): learning rate
eta_min (float, optional): Minimum learning rate. Defaults to 0.0.
warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0.
warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0.
last_epoch (int, optional): last epoch. Defaults to -1.
by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False.
"""
def__init__(self,
learning_rate,
step_each_epoch,
epochs,
step_each_epoch,
learning_rate,
eta_min=0.0,
warmup_epoch=0,
warmup_start_lr=0.0,
last_epoch=-1,
by_epoch=False,
**kwargs):
super().__init__()
ifwarmup_epoch>=epochs:
msg=f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}."
learning_rate (float): The initial learning rate. It is a python float number.
epochs (int): total epoch(s)
step_each_epoch (int): number of iterations within an epoch
learning_rate (float): learning rate
step_size (int): the interval to update.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma``.
It should be less than 1.0. Default: 0.1.
warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0.
warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma``. It should be less than 1.0. Default: 0.1.
warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0.
warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0.
last_epoch (int, optional): last epoch. Defaults to -1.
by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False.
"""
def__init__(self,
epochs,
step_each_epoch,
learning_rate,
step_size,
step_each_epoch,
epochs,
gamma,
warmup_epoch=0,
warmup_start_lr=0.0,
last_epoch=-1,
by_epoch=False,
**kwargs):
super().__init__()
ifwarmup_epoch>=epochs:
msg=f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}."
boundaries(list): A list of steps numbers. The type of element in the list is python int.
values(list): A list of learning rate values that will be picked during different epoch boundaries.
The type of element in the list is python float.
warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0.
warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0.
by_epoch(bool): Whether lr decay by epoch. Default: False.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
epochs (int): total epoch(s)
step_each_epoch (int): number of iterations within an epoch
decay_epochs (List[int]): A list of steps numbers. The type of element in the list is python int.
values (List[float]): A list of learning rate values that will be picked during different epoch boundaries.
warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0.
warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0.
last_epoch (int, optional): last epoch. Defaults to -1.
by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False.
"""
def__init__(self,
epochs,
step_each_epoch,
decay_epochs,
values,
epochs,
warmup_epoch=0,
warmup_start_lr=0.0,
by_epoch=False,
last_epoch=-1,
by_epoch=False,
**kwargs):
super().__init__()
ifwarmup_epoch>=epochs:
msg=f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}."
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
Args:
epochs (int): total epoch(s)
step_each_epoch (int): number of iterations within an epoch
learning_rate (float): learning rate
milestones (List[int]): List of each boundaries. Must be increasing.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma``. It should be less than 1.0. Defaults to 0.1.
warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0.
warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0.
last_epoch (int, optional): last epoch. Defaults to -1.
by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False.
"""
def__init__(self,
learning_rate,
milestones,
epochs,
step_each_epoch,
learning_rate,
milestones,
gamma=0.1,
warmup_epoch=0,
warmup_start_lr=0.0,
last_epoch=-1,
verbose=False):
ifnotisinstance(milestones,(tuple,list)):
raiseTypeError(
"The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s."
%type(milestones))
ifnotall([
milestones[i]<milestones[i+1]
foriinrange(len(milestones)-1)
]):
raiseValueError('The elements of milestones must be incremented')