diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index cf0ac058f11c08b47ca396dbdac6a34a7514b16b..4ea315257da6b07580d1b35b72fd0c93b4b1e1e0 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -177,6 +177,12 @@ paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shap paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.logical_and ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.logical_or ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.logical_xor ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.logical_not ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.clip ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.clip_by_norm ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None) @@ -242,12 +248,6 @@ paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords= paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) -paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 0049773bbeb514d5dfef490e73b9988bd5371029..c6250ff6ce5df8d8b0c78d538d736b77801f98f8 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -21,7 +21,7 @@ from .. import core from ..framework import Program, Variable, Operator from ..layer_helper import LayerHelper, unique_name from ..initializer import force_init_on_cpu -from .ops import logical_and, logical_not, logical_or +from .nn import logical_and, logical_not, logical_or import numpy import warnings import six diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a9696ac20060d1069a99a02a79a755a740e760f0..b8bb95e0de7f0bb866b50a02802bc8fcfdcbb413 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -51,7 +51,9 @@ __all__ = [ 'expand', 'sequence_concat', 'scale', 'elementwise_add', 'elementwise_div', 'elementwise_sub', 'elementwise_mul', 'elementwise_max', 'elementwise_min', 'elementwise_pow', 'uniform_random_batch_size_like', 'gaussian_random', - 'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'shape' + 'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'shape', + 'logical_and', 'logical_or', 'logical_xor', 'logical_not', 'clip', + 'clip_by_norm' ] @@ -953,8 +955,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100): soft_label (bool): a flag indicating whether to interpretate the given labels as soft labels. Default: `False`. - ignore_index (int): Specifies a target value that is ignored and does - not contribute to the input gradient. Only valid + ignore_index (int): Specifies a target value that is ignored and does + not contribute to the input gradient. Only valid if soft_label is set to False. Default: -100 Returns: @@ -2714,20 +2716,20 @@ def sequence_pad(x, pad_value, maxlen=None): Args: x(Variable): Input variable which should contain lod information. - pad_value(Variable): The Variable that holds values that will be fill - into padded steps. It can be a scalar or a tensor whose shape - equals to time steps in sequences. If it's a scalar, it will be + pad_value(Variable): The Variable that holds values that will be fill + into padded steps. It can be a scalar or a tensor whose shape + equals to time steps in sequences. If it's a scalar, it will be automatically broadcasted to the shape of time step. - maxlen(int, default None): The length of padded sequences. It can be - None or any positive int. When it is None, all sequences will be - padded up to the length of the longest one among them; when it a - certain positive value, it must be greater than the length of the + maxlen(int, default None): The length of padded sequences. It can be + None or any positive int. When it is None, all sequences will be + padded up to the length of the longest one among them; when it a + certain positive value, it must be greater than the length of the longest original sequence." - + Returns: - Variable: The padded sequence batch and the original lengths before + Variable: The padded sequence batch and the original lengths before padding. All sequences has the same length. - + Examples: .. code-block:: python @@ -4343,8 +4345,8 @@ def softmax_with_cross_entropy(logits, soft_label is set to true, Label is a Tensor with soft_label (bool): A flag to indicate whether to interpretate the given labels as soft labels. By default, `soft_label` is set to False. - ignore_index (int): Specifies a target value that is ignored and does - not contribute to the input gradient. Only valid + ignore_index (int): Specifies a target value that is ignored and does + not contribute to the input gradient. Only valid if soft_label is set to False. Default: -100 Returns: @@ -4601,14 +4603,14 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): def squeeze(input, axes, name=None): """ - Remove single-dimensional entries from the shape of a tensor. Takes a - parameter axes with a list of axes to squeeze. If axes is not provided, all - the single dimensions will be removed from the shape. If an axis is + Remove single-dimensional entries from the shape of a tensor. Takes a + parameter axes with a list of axes to squeeze. If axes is not provided, all + the single dimensions will be removed from the shape. If an axis is selected with shape entry not equal to one, an error is raised. - + Examples: Case 1: - Given + Given X.shape = (1, 3, 1, 5) and axes = [0] @@ -4617,11 +4619,11 @@ def squeeze(input, axes, name=None): Case 2: Given X.shape = (1, 3, 1, 5) - and + and axes = [] we get: Out.shape = (3, 5) - + Args: input (Variable): The input variable to be squeezed. axes (list): List of integers, indicating the dimensions to be squeezed. @@ -4651,14 +4653,14 @@ def squeeze(input, axes, name=None): def unsqueeze(input, axes, name=None): """ - Insert single-dimensional entries to the shape of a tensor. Takes one - required argument axes, a list of dimensions that will be inserted. - Dimension indices in axes are as seen in the output tensor. + Insert single-dimensional entries to the shape of a tensor. Takes one + required argument axes, a list of dimensions that will be inserted. + Dimension indices in axes are as seen in the output tensor. - For example: - Given a tensor such that tensor with shape [3, 4, 5], + For example: + Given a tensor such that tensor with shape [3, 4, 5], then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1]. - + Args: input (Variable): The input variable to be unsqueezed. axes (list): List of integers, indicating the dimensions to be inserted. @@ -5757,39 +5759,39 @@ def pad2d(input, Example: Given that X is a channel of image from input: - + X = [[1, 2, 3], [4, 5, 6]] - + Case 0: - + paddings = [0, 1, 2, 3], mode = 'constant' pad_value = 0 - + Out = [[0, 0, 1, 2, 3, 0, 0, 0] [0, 0, 4, 5, 6, 0, 0, 0] [0, 0, 0, 0, 0, 0, 0, 0]] - + Case 1: - + paddings = [0, 1, 2, 1], mode = 'reflect' - + Out = [[3, 2, 1, 2, 3, 2] [6, 5, 4, 5, 6, 5] [3, 2, 1, 2, 3, 2]] - + Case 2: - + paddings = [0, 1, 2, 1], mode = 'edge' - + Out = [[1, 1, 1, 2, 3, 3] [4, 4, 4, 5, 6, 6] [4, 4, 4, 5, 6, 6]] - - + + Args: input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format. paddings (tuple|list): The padding size. If padding is a tuple, it must @@ -5988,7 +5990,7 @@ def prelu(x, mode, param_attr=None, name=None): channel:elements in a channel share same weight element:each element has a weight name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + will be named automatically. Returns: Variable: The output tensor with the same shape as input. @@ -6166,10 +6168,10 @@ def flatten(x, axis=1, name=None): def sequence_enumerate(input, win_size, pad_value=0, name=None): """ Generate a new sequence for the input index sequence, which enumerates all the - sub-sequences with length `win_size` of the input. + sub-sequences with length `win_size` of the input. The enumerated sequence has the same 1st dimension with variable `input`, and the 2nd dimension is `win_size`, padded by `pad_value` if necessary in generation. - + Examples: Case 1: Input: @@ -6296,20 +6298,20 @@ def unstack(x, axis=0, num=None): **UnStack Layer** This layer unstacks input :code:`x` into several tensors along axis. - + If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`. If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`, and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is - raised. + raised. Args: - x (Variable): Input variable. + x (Variable): Input variable. axis (int): The axis along which the input is unstacked. num (int|None): The number of output variables. - + Returns: list(Variable): The unstacked variables. - + """ helper = LayerHelper('unstack', **locals()) @@ -6342,21 +6344,21 @@ def expand(x, expand_times, name=None): .. code-block:: text Input(X) is a 3-D tensor with shape [2, 3, 1]: - + [ [[1], [2], [3]], [[4], [5], [6]] ] - + Attr(expand_times): [1, 2, 2] - + Output(Out) is a 3-D tensor with shape [2, 6, 2]: - + [ [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] ] - + Args: x (Variable): A tensor with rank in [1, 6]. expand_times (list|tuple): Expand times number for each dimension. @@ -6667,7 +6669,7 @@ def scale(x, bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment} out(Tensor): Output tensor. act(basestring|None): Activation applied to the output. - name(basestring|None): Name of the output. + name(basestring|None): Name of the output. Returns: out(${out_type}): ${out_comment} @@ -6775,3 +6777,165 @@ for func in [ "act (basestring|None): Activation applied to the output.", "name (basestring|None): Name of the output." ]) + + +def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): + helper = LayerHelper(op_name, **locals()) + + if binary_op: + assert x.dtype == y.dtype + + if out is None: + if name is None: + out = helper.create_tmp_variable(dtype=x.dtype) + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) + + if binary_op: + helper.append_op( + type=op_name, inputs={"X": x, + "Y": y}, outputs={"Out": out}) + else: + helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) + + return out + + +@templatedoc() +def logical_and(x, y, out=None, name=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + y(${y_type}): ${y_comment} + out(Tensor): Output tensor of logical operation. + name(basestring|None): Name of the output. + + Returns: + out(${out_type}): ${out_comment} + """ + + return _logical_op( + op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) + + +@templatedoc() +def logical_or(x, y, out=None, name=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + y(${y_type}): ${y_comment} + out(Tensor): Output tensor of logical operation. + name(basestring|None): Name of the output. + + Returns: + out(${out_type}): ${out_comment} + """ + + return _logical_op( + op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True) + + +@templatedoc() +def logical_xor(x, y, out=None, name=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + y(${y_type}): ${y_comment} + out(Tensor): Output tensor of logical operation. + name(basestring|None): Name of the output. + + Returns: + out(${out_type}): ${out_comment} + """ + + return _logical_op( + op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True) + + +@templatedoc() +def logical_not(x, out=None, name=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + out(Tensor): Output tensor of logical operation. + name(basestring|None): Name of the output. + + Returns: + out(${out_type}): ${out_comment} + """ + + return _logical_op( + op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False) + + +@templatedoc() +def clip(x, min, max, name=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + min(${min_type}): ${min_comment} + max(${max_type}): ${max_comment} + name(basestring|None): Name of the output. + + Returns: + out(${out_type}): ${out_comment} + """ + + helper = LayerHelper("clip", **locals()) + + if name is None: + out = helper.create_tmp_variable(dtype=x.dtype) + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) + + helper.append_op( + type="clip", + inputs={"X": x}, + attrs={"min": min, + "max": max}, + outputs={"Out": out}) + + return out + + +@templatedoc() +def clip_by_norm(x, max_norm, name=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + max_norm(${max_norm_type}): ${max_norm_comment} + name(basestring|None): Name of the output. + + Returns: + out(${out_type}): ${out_comment} + """ + + helper = LayerHelper("clip_by_norm", **locals()) + + if name is None: + out = helper.create_tmp_variable(dtype=x.dtype) + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) + + helper.append_op( + type="clip_by_norm", + inputs={"X": x}, + attrs={"max_norm": max_norm}, + outputs={"Out": out}) + + return out diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 8a533035b0a3d175073cb0b0884aa507bcff782c..6605762d74baa150ad914959fd468a5afb38064b 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -39,12 +39,6 @@ __all__ = [ 'mean', 'mul', 'sigmoid_cross_entropy_with_logits', - 'clip', - 'clip_by_norm', - 'logical_and', - 'logical_or', - 'logical_xor', - 'logical_not', 'maxout', ]