diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 872464af732f63ae5525310eaf8335dedea4770a..2d073bfa4cf07596dd3a5ae79d545d8322417e5c 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -145,14 +145,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)) paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) -paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'out', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None, None)) -paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) -paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) -paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) -paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) -paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) -paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) -paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None)) +paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)) +paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) +paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) +paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) +paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) +paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) +paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) +paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)) paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False)) paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a9696ac20060d1069a99a02a79a755a740e760f0..2fbf44977c60be799419b5c329939130f605e221 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6630,14 +6630,12 @@ def _elementwise_op(helper): assert y is not None, 'y cannot be None in {}'.format(op_type) axis = helper.kwargs.get('axis', -1) use_mkldnn = helper.kwargs.get('use_mkldnn', False) - out = helper.kwargs.get('out', None) - if out is None: - name = helper.kwargs.get('name', None) - if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) - else: - out = helper.create_variable( - name=name, dtype=x.dtype, persistable=False) + name = helper.kwargs.get('name', None) + if name is None: + out = helper.create_tmp_variable(dtype=x.dtype) + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) helper.append_op( type=op_type, @@ -6650,13 +6648,7 @@ def _elementwise_op(helper): @templatedoc() -def scale(x, - scale=1.0, - bias=0.0, - bias_after_scale=True, - out=None, - act=None, - name=None): +def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ ${comment} @@ -6665,7 +6657,6 @@ def scale(x, scale(${scale_type}): ${scale_comment} bias(${bias_type}): ${bias_comment} bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment} - out(Tensor): Output tensor. act(basestring|None): Activation applied to the output. name(basestring|None): Name of the output. @@ -6674,12 +6665,11 @@ def scale(x, """ helper = LayerHelper('scale', **locals()) - if out is None: - if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) - else: - out = helper.create_variable( - name=name, dtype=x.dtype, persistable=False) + if name is None: + out = helper.create_tmp_variable(dtype=x.dtype) + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) helper.append_op( type='scale', @@ -6693,73 +6683,31 @@ def scale(x, return helper.append_activation(out) -def elementwise_add(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_add(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_add', **locals())) -def elementwise_div(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_div(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_div', **locals())) -def elementwise_sub(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_sub(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_sub', **locals())) -def elementwise_mul(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_mul(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_mul', **locals())) -def elementwise_max(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_max(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_max', **locals())) -def elementwise_min(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_min(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_min', **locals())) -def elementwise_pow(x, - y, - out=None, - axis=-1, - use_mkldnn=False, - act=None, - name=None): +def elementwise_pow(x, y, axis=-1, use_mkldnn=False, act=None, name=None): return _elementwise_op(LayerHelper('elementwise_pow', **locals())) @@ -6771,7 +6719,6 @@ for func in [ func.__doc__ = _generate_doc_string_( op_proto, additional_args_lines=[ - "out (Tensor): The output tensor of elementwise op.", "act (basestring|None): Activation applied to the output.", "name (basestring|None): Name of the output." ]) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 8a533035b0a3d175073cb0b0884aa507bcff782c..220d065f8f1cc02508dea2679820e1f7f490866d 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -56,6 +56,8 @@ for _OP in set(__all__): # e.g.: test_program_code.py, test_dist_train.py globals()['_scale'] = generate_layer_fn('scale') +globals()['_elementwise_div'] = generate_layer_fn('elementwise_div') + __all__ += __activations_noattr__ for _OP in set(__activations_noattr__): diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index ad09005d866b10146e6fcd7cf108c51f34322607..1b9571f6d3a6a69d1ac35f6be74b80eaa2ce6251 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -26,6 +26,7 @@ from .layer_helper import LayerHelper from .regularizer import append_regularization_ops from .clip import append_gradient_clip_ops, error_clip_callback from contextlib import contextmanager +from .layers import ops __all__ = [ 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl', @@ -1301,7 +1302,7 @@ class ModelAverage(Optimizer): x=tmp, dtype='float32' if self._dtype == None else self._dtype) sum = layers.cast( x=sum, dtype='float32' if self._dtype == None else self._dtype) - layers.elementwise_div(x=sum, y=tmp, out=param) + ops._elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param_grad): param = block._clone_variable(param_grad[0])