未验证 提交 2c4b8393 编写于 作者: X Xin Pan 提交者: GitHub

Merge pull request #13573 from velconia/fix_api

Fix kwargs in clip and logical API
...@@ -160,6 +160,12 @@ paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shap ...@@ -160,6 +160,12 @@ paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shap
paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.logical_and ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.logical_or ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.logical_xor ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.logical_not ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.clip ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.clip_by_norm ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
...@@ -225,12 +231,6 @@ paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords= ...@@ -225,12 +231,6 @@ paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords=
paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
......
...@@ -21,7 +21,7 @@ from .. import core ...@@ -21,7 +21,7 @@ from .. import core
from ..framework import Program, Variable, Operator from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu from ..initializer import force_init_on_cpu
from .ops import logical_and, logical_not, logical_or from .nn import logical_and, logical_not, logical_or
import numpy import numpy
import warnings import warnings
import six import six
......
...@@ -51,7 +51,9 @@ __all__ = [ ...@@ -51,7 +51,9 @@ __all__ = [
'expand', 'sequence_concat', 'scale', 'elementwise_add', 'elementwise_div', 'expand', 'sequence_concat', 'scale', 'elementwise_add', 'elementwise_div',
'elementwise_sub', 'elementwise_mul', 'elementwise_max', 'elementwise_min', 'elementwise_sub', 'elementwise_mul', 'elementwise_max', 'elementwise_min',
'elementwise_pow', 'uniform_random_batch_size_like', 'gaussian_random', 'elementwise_pow', 'uniform_random_batch_size_like', 'gaussian_random',
'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'shape' 'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'shape',
'logical_and', 'logical_or', 'logical_xor', 'logical_not', 'clip',
'clip_by_norm'
] ]
...@@ -6775,3 +6777,165 @@ for func in [ ...@@ -6775,3 +6777,165 @@ for func in [
"act (basestring|None): Activation applied to the output.", "act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output." "name (basestring|None): Name of the output."
]) ])
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
helper = LayerHelper(op_name, **locals())
if binary_op:
assert x.dtype == y.dtype
if out is None:
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x,
"Y": y}, outputs={"Out": out})
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
return out
@templatedoc()
def logical_and(x, y, out=None, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_or(x, y, out=None, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_xor(x, y, out=None, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_not(x, out=None, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
@templatedoc()
def clip(x, min, max, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
min(${min_type}): ${min_comment}
max(${max_type}): ${max_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
helper = LayerHelper("clip", **locals())
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip",
inputs={"X": x},
attrs={"min": min,
"max": max},
outputs={"Out": out})
return out
@templatedoc()
def clip_by_norm(x, max_norm, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
helper = LayerHelper("clip_by_norm", **locals())
if name is None:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out})
return out
...@@ -39,12 +39,6 @@ __all__ = [ ...@@ -39,12 +39,6 @@ __all__ = [
'mean', 'mean',
'mul', 'mul',
'sigmoid_cross_entropy_with_logits', 'sigmoid_cross_entropy_with_logits',
'clip',
'clip_by_norm',
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
'maxout', 'maxout',
] ]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册