提交 a43aac32 编写于 作者: littletomatodonkey's avatar littletomatodonkey

fix optimizer

上级 b17fbac3
...@@ -16,18 +16,124 @@ from __future__ import absolute_import ...@@ -16,18 +16,124 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import paddle.fluid.optimizer as pfopt import sys
import paddle.fluid.regularizer as pfreg
import paddle.fluid as fluid
__all__ = ['OptimizerBuilder'] __all__ = ['OptimizerBuilder']
class L1Decay(object):
"""
L1 Weight Decay Regularization, which encourages the weights to be sparse.
Args:
factor(float): regularization coeff. Default:0.0.
"""
def __init__(self, factor=0.0):
super(L1Decay, self).__init__()
self.regularization_coeff = factor
def __call__(self):
reg = fluid.regularizer.L1Decay(
regularization_coeff=self.regularization_coeff)
return reg
class L2Decay(object):
"""
L2 Weight Decay Regularization, which encourages the weights to be sparse.
Args:
factor(float): regularization coeff. Default:0.0.
"""
def __init__(self, factor=0.0):
super(L2Decay, self).__init__()
self.regularization_coeff = factor
def __call__(self):
reg = fluid.regularizer.L2Decay(
regularization_coeff=self.regularization_coeff)
return reg
class Momentum(object):
"""
Simple Momentum optimizer with velocity state.
Args:
learning_rate (float|Variable) - The learning rate used to update parameters.
Can be a float value or a Variable with one float value as data element.
momentum (float) - Momentum factor.
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
"""
def __init__(self,
learning_rate,
momentum,
parameter_list=None,
regularization=None,
**args):
super(Momentum, self).__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.parameter_list = parameter_list
self.regularization = regularization
def __call__(self):
opt = fluid.optimizer.Momentum(
learning_rate=self.learning_rate,
momentum=self.momentum,
parameter_list=self.parameter_list,
regularization=self.regularization)
return opt
class RMSProp(object):
"""
Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method.
Args:
learning_rate (float|Variable) - The learning rate used to update parameters.
Can be a float value or a Variable with one float value as data element.
momentum (float) - Momentum factor.
rho (float) - rho value in equation.
epsilon (float) - avoid division by zero, default is 1e-6.
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
"""
def __init__(self,
learning_rate,
momentum,
rho=0.95,
epsilon=1e-6,
parameter_list=None,
regularization=None,
**args):
super(RMSProp, self).__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.rho = rho
self.epsilon = epsilon
self.parameter_list = parameter_list
self.regularization = regularization
def __call__(self):
opt = fluid.optimizer.RMSProp(
learning_rate=self.learning_rate,
momentum=self.momentum,
rho=self.rho,
epsilon=self.epsilon,
parameter_list=self.parameter_list,
regularization=self.regularization)
return opt
class OptimizerBuilder(object): class OptimizerBuilder(object):
""" """
Build optimizer with fluid api in fluid.layers.optimizer, Build optimizer
such as fluid.layers.optimizer.Momentum()
https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn.html
https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/regularizer_cn.html
Args: Args:
function(str): optimizer name of learning rate function(str): optimizer name of learning rate
...@@ -43,13 +149,15 @@ class OptimizerBuilder(object): ...@@ -43,13 +149,15 @@ class OptimizerBuilder(object):
self.params = params self.params = params
# create regularizer # create regularizer
if regularizer is not None: if regularizer is not None:
mod = sys.modules[__name__]
reg_func = regularizer['function'] + 'Decay' reg_func = regularizer['function'] + 'Decay'
reg_factor = regularizer['factor'] del regularizer['function']
reg = getattr(pfreg, reg_func)(reg_factor) reg = getattr(mod, reg_func)(**regularizer)()
self.params['regularization'] = reg self.params['regularization'] = reg
def __call__(self, learning_rate, parameter_list): def __call__(self, learning_rate, parameter_list):
opt = getattr(pfopt, self.function) mod = sys.modules[__name__]
opt = getattr(mod, self.function)
return opt(learning_rate=learning_rate, return opt(learning_rate=learning_rate,
parameter_list=parameter_list, parameter_list=parameter_list,
**self.params) **self.params)()
...@@ -276,7 +276,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'): ...@@ -276,7 +276,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
("lr", AverageMeter( ("lr", AverageMeter(
'lr', 'f', need_avg=False)), 'lr', 'f', need_avg=False)),
("batch_time", AverageMeter('elapse', '.3f')), ("batch_time", AverageMeter('elapse', '.3f')),
('reader_time', AverageMeter('reader', '.3f')),
] ]
if not use_mix: if not use_mix:
topk_name = 'top{}'.format(config.topk) topk_name = 'top{}'.format(config.topk)
...@@ -287,7 +286,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'): ...@@ -287,7 +286,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
tic = time.time() tic = time.time()
for idx, batch in enumerate(dataloader()): for idx, batch in enumerate(dataloader()):
metric_list['reader_time'].update(time.time() - tic)
batch_size = len(batch[0]) batch_size = len(batch[0])
feeds = create_feeds(batch, use_mix) feeds = create_feeds(batch, use_mix)
fetchs = create_fetchs(feeds, net, config, mode) fetchs = create_fetchs(feeds, net, config, mode)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册