提交 2e40660e 编写于 作者: W wanghaoshuang

Fix some issues.

上级 19db989e
......@@ -1155,7 +1155,7 @@ class Parameter(Variable):
self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None)
self.average = kwargs.get('average', True)
self.do_model_average = kwargs.get('do_model_average', None)
def __str__(self):
return self.to_string(True)
......@@ -1177,7 +1177,7 @@ class Parameter(Variable):
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"gradient_clip_attr", "average")
"gradient_clip_attr", "do_model_average")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
str(getattr(self, attr_name)))
......
......@@ -1489,8 +1489,7 @@ def batch_norm(input,
name=None,
moving_mean_name=None,
moving_variance_name=None,
average_mean=True,
average_variance=True):
do_model_average_for_mean_and_var=False):
"""
This function helps create an operator to implement
the BatchNorm layer using the configurations from the input parameters.
......@@ -1519,12 +1518,15 @@ def batch_norm(input,
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
if do_model_average_for_mean_and_var:
do_model_average_for_mean_and_var = None
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
average=average_variance),
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=input.dtype)
mean.stop_gradient = True
......@@ -1534,7 +1536,7 @@ def batch_norm(input,
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
average=average_mean),
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=input.dtype)
variance.stop_gradient = True
......
......@@ -840,7 +840,7 @@ class ModelAverage(Optimizer):
"""
def __init__(self,
average_window_rate=0.15,
average_window_rate,
params_grads=None,
min_average_window=10000,
max_average_window=10000,
......@@ -856,7 +856,7 @@ class ModelAverage(Optimizer):
params[param.name] = (param, grad)
for param in framework.default_main_program().global_block(
).all_parameters():
if param.name not in params and param.average:
if param.name not in params and param.do_model_average != False:
grad = param.block.create_var(
name=unique_name.generate(".".join([param.name, 'tmp'])),
dtype=param.dtype,
......
......@@ -29,14 +29,14 @@ class ParamAttr(object):
regularizer=None,
trainable=True,
gradient_clip=None,
average=True):
do_model_average=None):
self.name = name
self.initializer = initializer
self.learning_rate = learning_rate
self.regularizer = regularizer
self.trainable = trainable
self.gradient_clip = gradient_clip
self.average = average
self.model_average = do_model_average
def set_default_initializer(self, initializer):
if initializer is None:
......@@ -83,7 +83,7 @@ class ParamAttr(object):
'regularizer': self.regularizer,
'trainable': self.trainable,
'gradient_clip_attr': self.gradient_clip,
'average': self.average
'model_average': self.model_average
}
if with_initializer:
kwargs['initializer'] = self.initializer
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册