未验证 提交 2cd558fb 编写于 作者: Z Zeng Jinle 提交者: GitHub

Merge pull request #13561 from sneaxiy/fix_api_kwargs

[Urgent] Add out params to some apis
...@@ -162,14 +162,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key ...@@ -162,14 +162,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)) paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)) paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'out', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None, None))
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None)) paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
......
...@@ -6471,12 +6471,14 @@ def _elementwise_op(helper): ...@@ -6471,12 +6471,14 @@ def _elementwise_op(helper):
assert y is not None, 'y cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type)
axis = helper.kwargs.get('axis', -1) axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False) use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None) out = helper.kwargs.get('out', None)
if name is None: if out is None:
out = helper.create_tmp_variable(dtype=x.dtype) name = helper.kwargs.get('name', None)
else: if name is None:
out = helper.create_variable( out = helper.create_tmp_variable(dtype=x.dtype)
name=name, dtype=x.dtype, persistable=False) else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type=op_type, type=op_type,
...@@ -6489,7 +6491,13 @@ def _elementwise_op(helper): ...@@ -6489,7 +6491,13 @@ def _elementwise_op(helper):
@templatedoc() @templatedoc()
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): def scale(x,
scale=1.0,
bias=0.0,
bias_after_scale=True,
out=None,
act=None,
name=None):
""" """
${comment} ${comment}
...@@ -6498,6 +6506,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -6498,6 +6506,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
scale(${scale_type}): ${scale_comment} scale(${scale_type}): ${scale_comment}
bias(${bias_type}): ${bias_comment} bias(${bias_type}): ${bias_comment}
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment} bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
out(Tensor): Output tensor.
act(basestring|None): Activation applied to the output. act(basestring|None): Activation applied to the output.
name(basestring|None): Name of the output. name(basestring|None): Name of the output.
...@@ -6506,11 +6515,12 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -6506,11 +6515,12 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
""" """
helper = LayerHelper('scale', **locals()) helper = LayerHelper('scale', **locals())
if name is None: if out is None:
out = helper.create_tmp_variable(dtype=x.dtype) if name is None:
else: out = helper.create_tmp_variable(dtype=x.dtype)
out = helper.create_variable( else:
name=name, dtype=x.dtype, persistable=False) out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type='scale', type='scale',
...@@ -6524,31 +6534,73 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -6524,31 +6534,73 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
return helper.append_activation(out) return helper.append_activation(out)
def elementwise_add(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_add(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_add', **locals())) return _elementwise_op(LayerHelper('elementwise_add', **locals()))
def elementwise_div(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_div(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_div', **locals())) return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_sub(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_sub', **locals())) return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
def elementwise_mul(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_mul(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_mul', **locals())) return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_max(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_max(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_max', **locals())) return _elementwise_op(LayerHelper('elementwise_max', **locals()))
def elementwise_min(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_min(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_min', **locals())) return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x, y, axis=-1, use_mkldnn=False, act=None, name=None): def elementwise_pow(x,
y,
out=None,
axis=-1,
use_mkldnn=False,
act=None,
name=None):
return _elementwise_op(LayerHelper('elementwise_pow', **locals())) return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
...@@ -6560,6 +6612,7 @@ for func in [ ...@@ -6560,6 +6612,7 @@ for func in [
func.__doc__ = _generate_doc_string_( func.__doc__ = _generate_doc_string_(
op_proto, op_proto,
additional_args_lines=[ additional_args_lines=[
"out (Tensor): The output tensor of elementwise op.",
"act (basestring|None): Activation applied to the output.", "act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output." "name (basestring|None): Name of the output."
]) ])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册