未验证 提交 1cbaf71a 编写于 作者: Z Zeng Jinle 提交者: GitHub

Merge pull request #13620 from sneaxiy/fix_api_kwargs2

Hide out params in elementwise layers and scale layer
......@@ -145,14 +145,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'out', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None, None))
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None))
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0))
paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False))
paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32'))
......
......@@ -6632,14 +6632,12 @@ def _elementwise_op(helper):
assert y is not None, 'y cannot be None in {}'.format(op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
out = helper.kwargs.get('out', None)
if out is None:
name = helper.kwargs.get('name', None)
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
name = helper.kwargs.get('name', None)
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
......@@ -6652,13 +6650,7 @@ def _elementwise_op(helper):
@templatedoc()
def scale(x,
scale=1.0,
bias=0.0,
bias_after_scale=True,
out=None,
act=None,
name=None):
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
${comment}
......@@ -6667,7 +6659,6 @@ def scale(x,
scale(${scale_type}): ${scale_comment}
bias(${bias_type}): ${bias_comment}
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
out(Tensor): Output tensor.
act(basestring|None): Activation applied to the output.
name(basestring|None): Name of the output.
......@@ -6676,12 +6667,11 @@ def scale(x,
"""
helper = LayerHelper('scale', **locals())
if out is None:
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type='scale',
......@@ -6695,73 +6685,31 @@ def scale(x,
return helper.append_activation(out)
def elementwise_add(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_add(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
def elementwise_div(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_div(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_sub(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
def elementwise_mul(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_mul(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_max(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_max(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
def elementwise_min(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_min(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
def elementwise_pow(x, y, axis=-1, use_mkldnn=False, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
......@@ -6773,7 +6721,6 @@ for func in [
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"out (Tensor): The output tensor of elementwise op.",
"act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output."
])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册