logic.py 48.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
# TODO: define logic functions of a tensor

17
import paddle
18

19
from ..common_ops_import import Variable
20 21
from ..fluid.data_feeder import check_type, check_variable_and_dtype
from .layer_function_generator import templatedoc
22

W
wanghuancoder 已提交
23
Tensor = paddle.fluid.framework.core.eager.Tensor
24

25
from paddle import _C_ops
26
from paddle.tensor.creation import full
27 28
from paddle.tensor.math import broadcast_shape
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
29

30
from ..framework import LayerHelper, in_dynamic_mode
31

32 33
__all__ = []

34

35
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
36
    if in_dynamic_mode():
37 38 39 40 41
        op = getattr(_C_ops, op_name)
        if binary_op:
            return op(x, y)
        else:
            return op(x)
42
    else:
43
        check_variable_and_dtype(
44 45
            x,
            "x",
46 47 48 49 50 51 52 53 54
            [
                "bool",
                "int8",
                "int16",
                "int32",
                "int64",
                "float16",
                "float32",
                "float64",
55
                "uint16",
56 57
                "complex64",
                "complex128",
58
            ],
59 60
            op_name,
        )
61 62 63 64 65 66 67 68 69 70
        if y is not None:
            check_variable_and_dtype(
                y,
                "y",
                [
                    "bool",
                    "int8",
                    "int16",
                    "int32",
                    "int64",
71
                    "float16",
72 73
                    "float32",
                    "float64",
74
                    "uint16",
75 76
                    "complex64",
                    "complex128",
77 78 79 80 81
                ],
                op_name,
            )
        if out is not None:
            check_type(out, "out", Variable, op_name)
82

83
        helper = LayerHelper(op_name, **locals())
84

85 86
        if binary_op and x.dtype != y.dtype:
            raise ValueError(
87
                f"(InvalidArgument) The DataType of {op_name} Op's Variable must be consistent, but received {x.dtype} and {y.dtype}."
88
            )
89

90 91
        if out is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
92

93 94 95 96 97 98 99 100
        if binary_op:
            helper.append_op(
                type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
            )
        else:
            helper.append_op(
                type=op_name, inputs={"X": x}, outputs={"Out": out}
            )
101

102
        return out
103 104 105 106 107


def logical_and(x, y, out=None, name=None):
    r"""

108
    Compute element-wise logical AND on ``x`` and ``y``, and return ``out``. ``out`` is N-dim boolean ``Tensor``.
109 110 111 112 113 114
    Each element of ``out`` is calculated by

    .. math::

        out = x \&\& y

115
    Note:
I
Infinity_lee 已提交
116 117 118
        ``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
119 120

    Args:
121 122
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
123
        out(Tensor, optional): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
124 125 126 127 128 129 130 131
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.

    Examples:
        .. code-block:: python

132 133 134 135 136 137 138 139
            >>> import paddle

            >>> x = paddle.to_tensor([True])
            >>> y = paddle.to_tensor([True, False, True, False])
            >>> res = paddle.logical_and(x, y)
            >>> print(res)
            Tensor(shape=[4], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False, True , False])
140 141

    """
142
    if in_dynamic_mode():
143
        return _C_ops.logical_and(x, y)
144

145 146 147
    return _logical_op(
        op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True
    )
148 149


150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
@inplace_apis_in_dygraph_only
def logical_and_(x, y, name=None):
    r"""
    Inplace version of ``logical_and`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logical_and`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.logical_and_(x, y)


167 168 169 170 171 172 173 174 175 176
def logical_or(x, y, out=None, name=None):
    """

    ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
    Each element of ``out`` is calculated by

    .. math::

        out = x || y

177
    Note:
I
Infinity_lee 已提交
178 179 180
        ``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
181

182
    Args:
183 184
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
185 186 187 188 189 190 191 192 193
        out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.

    Examples:
        .. code-block:: python

194
            >>> import paddle
195

196 197 198 199 200 201 202
            >>> x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
            >>> y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
            >>> res = paddle.logical_or(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [[True , True ],
             [True , False]])
203
    """
204
    if in_dynamic_mode():
205
        return _C_ops.logical_or(x, y)
206 207 208
    return _logical_op(
        op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True
    )
209 210


211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
@inplace_apis_in_dygraph_only
def logical_or_(x, y, name=None):
    r"""
    Inplace version of ``logical_or`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logical_or`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.logical_or_(x, y)


228 229 230 231 232 233 234 235 236 237
def logical_xor(x, y, out=None, name=None):
    r"""

    ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
    Each element of ``out`` is calculated by

    .. math::

        out = (x || y) \&\& !(x \&\& y)

238
    Note:
I
Infinity_lee 已提交
239 240 241
        ``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
242 243

    Args:
244 245
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128.
246 247 248 249 250 251 252 253 254
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.

    Examples:
        .. code-block:: python

255
            >>> import paddle
256

257 258 259 260 261 262 263
            >>> x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
            >>> y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
            >>> res = paddle.logical_xor(x, y)
            >>> print(res)
            Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [[False, True ],
             [True , False]])
264
    """
265
    if in_dynamic_mode():
266
        return _C_ops.logical_xor(x, y)
267

268 269 270
    return _logical_op(
        op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True
    )
271 272


273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
@inplace_apis_in_dygraph_only
def logical_xor_(x, y, name=None):
    r"""
    Inplace version of ``logical_xor`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_logical_xor`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.logical_xor_(x, y)


290 291 292 293 294 295 296 297 298 299
def logical_not(x, out=None, name=None):
    """

    ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``.
    Each element of ``out`` is calculated by

    .. math::

        out = !x

I
Infinity_lee 已提交
300 301 302 303 304
    Note:
        ``paddle.logical_not`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor

305
    Args:
306

307
        x(Tensor):  Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64, complex64, complex128.
308 309 310 311
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
        name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
312
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
313 314 315 316

    Examples:
        .. code-block:: python

317
            >>> import paddle
318

319 320 321 322 323
            >>> x = paddle.to_tensor([True, False, True, False])
            >>> res = paddle.logical_not(x)
            >>> print(res)
            Tensor(shape=[4], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, True , False, True ])
324
    """
325
    if in_dynamic_mode():
326
        return _C_ops.logical_not(x)
327 328 329
    return _logical_op(
        op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False
    )
330 331


332 333 334 335 336 337 338 339 340 341
@inplace_apis_in_dygraph_only
def logical_not_(x, name=None):
    r"""
    Inplace version of ``logical_not`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logical_not`.
    """
    if in_dynamic_mode():
        return _C_ops.logical_not_(x)


342 343 344 345 346 347 348
def is_empty(x, name=None):
    """

    Test whether a Tensor is empty.

    Args:
        x (Tensor): The Tensor to be tested.
349
        name (str, optional): The default value is ``None`` . Normally users don't have to set this parameter. For more information, please refer to :ref:`api_guide_Name` .
350 351 352 353 354 355 356

    Returns:
        Tensor: A bool scalar Tensor. True if 'x' is an empty Tensor.

    Examples:
        .. code-block:: python

357
            >>> import paddle
358

359 360 361 362 363
            >>> input = paddle.rand(shape=[4, 32, 32], dtype='float32')
            >>> res = paddle.is_empty(x=input)
            >>> print(res)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            False)
364 365

    """
366
    if in_dynamic_mode():
367
        return _C_ops.is_empty(x)
368 369 370 371 372
    else:
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
        )
        check_type(name, "name", (str, type(None)), "is_empty")
373

374 375 376 377 378 379 380
        helper = LayerHelper("is_empty", **locals())
        cond = helper.create_variable_for_type_inference(dtype='bool')
        cond.stop_gradient = True
        helper.append_op(
            type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}
        )
        return cond
381 382


W
wawltor 已提交
383
def equal_all(x, y, name=None):
384
    """
385
    Returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
386

387
    Note:
388
        The output has no gradient.
389 390

    Args:
391 392
        x(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
        y(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
W
wawltor 已提交
393 394
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
395 396

    Returns:
W
wawltor 已提交
397
        Tensor: output Tensor, data type is bool, value is [False] or [True].
398 399 400 401

    Examples:
        .. code-block:: python

402 403 404 405 406 407 408 409 410 411 412 413 414
            >>> import paddle

            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 2, 3])
            >>> z = paddle.to_tensor([1, 4, 3])
            >>> result1 = paddle.equal_all(x, y)
            >>> print(result1)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            True)
            >>> result2 = paddle.equal_all(x, z)
            >>> print(result2)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            False)
415
    """
416
    if in_dynamic_mode():
417
        return _C_ops.equal_all(x, y)
418 419 420 421 422 423 424 425 426
    else:
        helper = LayerHelper("equal_all", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        helper.append_op(
            type='equal_all',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
Z
Zhen Wang 已提交
427 428 429


@templatedoc()
430
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
431 432 433 434 435 436
    r"""
    Check if all :math:`x` and :math:`y` satisfy the condition:

    .. math::
        \left| x - y \right| \leq atol + rtol \times \left| y \right|

H
hg-1099255210 已提交
437
    elementwise, for all elements of :math:`x` and :math:`y`. This is analogous to :math:`numpy.allclose`, namely that it returns :math:`True` if
438
    two tensors are elementwise equal within a tolerance.
Z
Zhen Wang 已提交
439 440

    Args:
441 442 443 444 445
        x (Tensor): The input tensor, it's data type should be float16, float32, float64.
        y (Tensor): The input tensor, it's data type should be float16, float32, float64.
        rtol (rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
        atol (atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
        equal_nan (equalnantype, optional): ${equal_nan_comment}. Default: False.
446 447
        name (str, optional): Name for the operation. For more information, please
            refer to :ref:`api_guide_Name`. Default: None.
Z
Zhen Wang 已提交
448 449

    Returns:
450
        Tensor: The output tensor, it's data type is bool.
451

Z
Zhen Wang 已提交
452 453 454
    Examples:
        .. code-block:: python

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
            >>> import paddle

            >>> x = paddle.to_tensor([10000., 1e-07])
            >>> y = paddle.to_tensor([10000.1, 1e-08])
            >>> result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan")
            >>> print(result1)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            False)
            >>> result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan")
            >>> print(result2)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            False)
            >>> x = paddle.to_tensor([1.0, float('nan')])
            >>> y = paddle.to_tensor([1.0, float('nan')])
            >>> result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan")
            >>> print(result1)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            False)
            >>> result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan")
            >>> print(result2)
            Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            True)
Z
Zhen Wang 已提交
477 478
    """

479
    if in_dynamic_mode():
480
        return _C_ops.allclose(x, y, rtol, atol, equal_nan)
481
    else:
482 483 484 485 486 487
        check_variable_and_dtype(
            x, "input", ['float16', 'float32', 'float64'], 'allclose'
        )
        check_variable_and_dtype(
            y, "input", ['float16', 'float32', 'float64'], 'allclose'
        )
488 489 490 491 492 493 494 495 496 497 498 499
        check_type(rtol, 'rtol', float, 'allclose')
        check_type(atol, 'atol', float, 'allclose')
        check_type(equal_nan, 'equal_nan', bool, 'allclose')

        helper = LayerHelper("allclose", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')

        inputs = {'Input': x, 'Other': y}
        outputs = {'Out': out}
        attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
        helper.append_op(
            type='allclose', inputs=inputs, outputs=outputs, attrs=attrs
500
        )
Z
Zhen Wang 已提交
501

502
        return out
503 504


W
wawltor 已提交
505 506
@templatedoc()
def equal(x, y, name=None):
507
    """
S
swtkiwi 已提交
508

509
    This layer returns the truth value of :math:`x == y` elementwise.
N
Noel 已提交
510

511
    Note:
512
        The output has no gradient.
513 514

    Args:
515 516 517
        x (Tensor): Tensor, data type is bool, float16, float32, float64, int32, int64.
        y (Tensor): Tensor, data type is bool, float16, float32, float64, int32, int64.
        name (str, optional): The default value is None. Normally there is no need for
518 519 520
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
W
wawltor 已提交
521
        Tensor: output Tensor, it's shape is the same as the input's Tensor,
522
        and the data type is bool. The result of this op is stop_gradient.
523 524 525 526

    Examples:
        .. code-block:: python

527
            >>> import paddle
W
wawltor 已提交
528

529 530 531 532 533 534
            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 3, 2])
            >>> result1 = paddle.equal(x, y)
            >>> print(result1)
            Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False, False])
535
    """
536 537
    if not isinstance(y, (int, bool, float, Variable)):
        raise TypeError(
538 539 540 541
            "Type of input args must be float, bool, int or Tensor, but received type {}".format(
                type(y)
            )
        )
542
    if not isinstance(y, Variable):
543
        y = full(shape=[], dtype=x.dtype, fill_value=y)
544

545
    if in_dynamic_mode():
546
        return _C_ops.equal(x, y)
J
Jiabin Yang 已提交
547
    else:
548 549 550
        check_variable_and_dtype(
            x,
            "x",
551 552 553 554 555 556 557 558 559
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
560 561 562 563 564
            "equal",
        )
        check_variable_and_dtype(
            y,
            "y",
565 566 567 568 569 570 571 572 573
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
574 575 576 577 578
            "equal",
        )
        helper = LayerHelper("equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
579

580 581 582 583 584 585
        helper.append_op(
            type='equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
586

W
wawltor 已提交
587

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
@inplace_apis_in_dygraph_only
def equal_(x, y, name=None):
    r"""
    Inplace version of ``equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.equal_(x, y)


W
wawltor 已提交
605 606 607
@templatedoc()
def greater_equal(x, y, name=None):
    """
608
    Returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
N
Noel 已提交
609

610
    Note:
611
        The output has no gradient.
W
wawltor 已提交
612 613

    Args:
614 615 616
        x (Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y (Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        name (str, optional): The default value is None.  Normally there is no need for
W
wawltor 已提交
617 618
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
    Returns:
619
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
620 621 622

    Examples:
        .. code-block:: python
N
Noel 已提交
623

624
            >>> import paddle
W
wawltor 已提交
625

626 627 628 629 630 631
            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 3, 2])
            >>> result1 = paddle.greater_equal(x, y)
            >>> print(result1)
            Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False, True ])
W
wawltor 已提交
632
    """
633
    if in_dynamic_mode():
634
        return _C_ops.greater_equal(x, y)
J
Jiabin Yang 已提交
635
    else:
636 637 638
        check_variable_and_dtype(
            x,
            "x",
639 640 641 642 643 644 645 646 647
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
648 649 650 651 652
            "greater_equal",
        )
        check_variable_and_dtype(
            y,
            "y",
653 654 655 656 657 658 659 660 661
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
662 663 664 665 666
            "greater_equal",
        )
        helper = LayerHelper("greater_equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
667

668 669 670 671 672 673
        helper.append_op(
            type='greater_equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
674 675


676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
@inplace_apis_in_dygraph_only
def greater_equal_(x, y, name=None):
    r"""
    Inplace version of ``greater_equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_greater_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.greater_equal_(x, y)


W
wawltor 已提交
693 694 695
@templatedoc()
def greater_than(x, y, name=None):
    """
696
    Returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
N
Noel 已提交
697

698
    Note:
699
        The output has no gradient.
W
wawltor 已提交
700 701

    Args:
702 703 704
        x (Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y (Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        name (str, optional): The default value is None.  Normally there is no need for
W
wawltor 已提交
705 706
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
    Returns:
707
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
708 709 710

    Examples:
        .. code-block:: python
N
Noel 已提交
711

712
            >>> import paddle
W
wawltor 已提交
713

714 715 716 717 718 719
            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 3, 2])
            >>> result1 = paddle.greater_than(x, y)
            >>> print(result1)
            Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, False, True ])
W
wawltor 已提交
720
    """
721
    if in_dynamic_mode():
722
        return _C_ops.greater_than(x, y)
J
Jiabin Yang 已提交
723
    else:
724 725 726
        check_variable_and_dtype(
            x,
            "x",
727 728 729 730 731 732 733 734 735
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
736 737 738 739 740
            "greater_than",
        )
        check_variable_and_dtype(
            y,
            "y",
741 742 743 744 745 746 747 748 749
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
750 751 752 753 754
            "greater_than",
        )
        helper = LayerHelper("greater_than", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
755

756 757 758 759 760 761
        helper.append_op(
            type='greater_than',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
762 763


764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
@inplace_apis_in_dygraph_only
def greater_than_(x, y, name=None):
    r"""
    Inplace version of ``greater_than`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_greater_than`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.greater_than_(x, y)


W
wawltor 已提交
781 782 783
@templatedoc()
def less_equal(x, y, name=None):
    """
784
    Returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
N
Noel 已提交
785

786
    Note:
787
        The output has no gradient.
W
wawltor 已提交
788 789

    Args:
790 791 792
        x (Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y (Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        name (str, optional): The default value is None.  Normally there is no need for
W
wawltor 已提交
793 794 795
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
796
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
797 798 799

    Examples:
        .. code-block:: python
N
Noel 已提交
800

801
            >>> import paddle
W
wawltor 已提交
802

803 804 805 806 807 808
            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 3, 2])
            >>> result1 = paddle.less_equal(x, y)
            >>> print(result1)
            Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , True , False])
W
wawltor 已提交
809
    """
810
    if in_dynamic_mode():
811
        return _C_ops.less_equal(x, y)
J
Jiabin Yang 已提交
812
    else:
813 814 815
        check_variable_and_dtype(
            x,
            "x",
816 817 818 819 820 821 822 823 824
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
825 826 827 828 829
            "less_equal",
        )
        check_variable_and_dtype(
            y,
            "y",
830 831 832 833 834 835 836 837 838
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
839 840 841 842 843
            "less_equal",
        )
        helper = LayerHelper("less_equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
844

845 846 847 848 849 850
        helper.append_op(
            type='less_equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
851 852


853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
@inplace_apis_in_dygraph_only
def less_equal_(x, y, name=None):
    r"""
    Inplace version of ``less_equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_less_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.less_equal_(x, y)


W
wawltor 已提交
870 871 872
@templatedoc()
def less_than(x, y, name=None):
    """
873
    Returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
N
Noel 已提交
874

875
    Note:
876
        The output has no gradient.
W
wawltor 已提交
877 878

    Args:
879 880 881
        x (Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y (Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        name (str, optional): The default value is None.  Normally there is no need for
W
wawltor 已提交
882 883 884
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
885
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
886 887 888

    Examples:
        .. code-block:: python
N
Noel 已提交
889

890
            >>> import paddle
W
wawltor 已提交
891

892 893 894 895 896 897
            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 3, 2])
            >>> result1 = paddle.less_than(x, y)
            >>> print(result1)
            Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, True , False])
W
wawltor 已提交
898
    """
899
    if in_dynamic_mode():
900
        return _C_ops.less_than(x, y)
J
Jiabin Yang 已提交
901
    else:
902 903 904
        check_variable_and_dtype(
            x,
            "x",
905 906 907 908 909 910 911 912 913
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
914 915 916 917 918
            "less_than",
        )
        check_variable_and_dtype(
            y,
            "y",
919 920 921 922 923 924 925 926 927
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
928 929 930 931 932
            "less_than",
        )
        helper = LayerHelper("less_than", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
933

934 935 936 937 938 939
        helper.append_op(
            type='less_than',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
940 941


942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
@inplace_apis_in_dygraph_only
def less_than_(x, y, name=None):
    r"""
    Inplace version of ``less_than`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_less_than`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.less_than_(x, y)


W
wawltor 已提交
959 960 961
@templatedoc()
def not_equal(x, y, name=None):
    """
962
    Returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
963 964

    Note:
965
        The output has no gradient.
W
wawltor 已提交
966 967

    Args:
968 969 970
        x (Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
        y (Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
        name (str, optional): The default value is None.  Normally there is no need for
W
wawltor 已提交
971 972 973
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
974
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
975 976 977

    Examples:
        .. code-block:: python
978

979
            >>> import paddle
W
wawltor 已提交
980

981 982 983 984 985 986
            >>> x = paddle.to_tensor([1, 2, 3])
            >>> y = paddle.to_tensor([1, 3, 2])
            >>> result1 = paddle.not_equal(x, y)
            >>> print(result1)
            Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True,
            [False, True , True ])
W
wawltor 已提交
987
    """
988
    if in_dynamic_mode():
989
        return _C_ops.not_equal(x, y)
J
Jiabin Yang 已提交
990
    else:
991 992 993
        check_variable_and_dtype(
            x,
            "x",
994 995 996 997 998 999 1000 1001 1002
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
1003 1004 1005 1006 1007
            "not_equal",
        )
        check_variable_and_dtype(
            y,
            "y",
1008 1009 1010 1011 1012 1013 1014 1015 1016
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
1017 1018 1019 1020 1021
            "not_equal",
        )
        helper = LayerHelper("not_equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
1022

1023 1024 1025 1026 1027 1028
        helper.append_op(
            type='not_equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
Z
zhulei 已提交
1029 1030


1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
@inplace_apis_in_dygraph_only
def not_equal_(x, y, name=None):
    r"""
    Inplace version of ``not_equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_not_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.not_equal_(x, y)


Z
zhulei 已提交
1048 1049 1050
def is_tensor(x):
    """

C
Chen Long 已提交
1051
    Tests whether input object is a paddle.Tensor.
Z
zhulei 已提交
1052 1053 1054 1055 1056

    Args:
        x (object): Object to test.

    Returns:
C
Chen Long 已提交
1057
        A boolean value. True if ``x`` is a paddle.Tensor, otherwise False.
Z
zhulei 已提交
1058 1059 1060 1061

    Examples:
        .. code-block:: python

1062
            >>> import paddle
Z
zhulei 已提交
1063

1064 1065 1066 1067
            >>> input1 = paddle.rand(shape=[2, 3, 5], dtype='float32')
            >>> check = paddle.is_tensor(input1)
            >>> print(check)
            True
Z
zhulei 已提交
1068

1069 1070 1071 1072
            >>> input3 = [1, 4]
            >>> check = paddle.is_tensor(input3)
            >>> print(check)
            False
1073

Z
zhulei 已提交
1074
    """
1075
    if in_dynamic_mode():
1076 1077 1078
        return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor))
    else:
        return isinstance(x, Variable)
1079 1080 1081


def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
1082
    if in_dynamic_mode():
W
wanghuancoder 已提交
1083
        op = getattr(_C_ops, op_name)
1084 1085 1086 1087
        if binary_op:
            return op(x, y)
        else:
            return op(x)
1088
    else:
1089
        check_variable_and_dtype(
1090 1091
            x,
            "x",
1092 1093 1094
            ["bool", "uint8", "int8", "int16", "int32", "int64"],
            op_name,
        )
1095 1096 1097 1098 1099 1100 1101 1102 1103
        if y is not None:
            check_variable_and_dtype(
                y,
                "y",
                ["bool", "uint8", "int8", "int16", "int32", "int64"],
                op_name,
            )
        if out is not None:
            check_type(out, "out", Variable, op_name)
1104

1105 1106 1107
        helper = LayerHelper(op_name, **locals())
        if binary_op:
            assert x.dtype == y.dtype
1108

1109 1110
        if out is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
1111

1112 1113 1114 1115 1116 1117 1118 1119
        if binary_op:
            helper.append_op(
                type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
            )
        else:
            helper.append_op(
                type=op_name, inputs={"X": x}, outputs={"Out": out}
            )
1120

1121
        return out
1122 1123 1124


def bitwise_and(x, y, out=None, name=None):
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
    r"""

    Apply ``bitwise_and`` on Tensor ``X`` and ``Y`` .

    .. math::
        Out = X \& Y

    .. note::
        ``paddle.bitwise_and`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

    .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1136

1137
    Args:
1138 1139
        x (Tensor): Input Tensor of ``bitwise_and`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        y (Tensor): Input Tensor of ``bitwise_and`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
1140 1141 1142
        out (Tensor, optional): Result of ``bitwise_and`` . It is a N-D Tensor with the same data type of input Tensor. Default: None.
        name (str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1143 1144

    Returns:
1145
        Tensor: Result of ``bitwise_and`` . It is a N-D Tensor with the same data type of input Tensor.
1146

1147 1148 1149
    Examples:
        .. code-block:: python

1150 1151 1152 1153 1154 1155 1156
            >>> import paddle
            >>> x = paddle.to_tensor([-5, -1, 1])
            >>> y = paddle.to_tensor([4,  2, -3])
            >>> res = paddle.bitwise_and(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [0, 2, 1])
1157
    """
1158
    if in_dynamic_mode() and out is None:
1159
        return _C_ops.bitwise_and(x, y)
1160 1161 1162
    return _bitwise_op(
        op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True
    )
1163 1164


1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
@inplace_apis_in_dygraph_only
def bitwise_and_(x, y, name=None):
    r"""
    Inplace version of ``bitwise_and`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_and`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.bitwise_and_(x, y)


1182
def bitwise_or(x, y, out=None, name=None):
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
    r"""

    Apply ``bitwise_or`` on Tensor ``X`` and ``Y`` .

    .. math::
        Out = X | Y

    .. note::
        ``paddle.bitwise_or`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

    .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1194

1195
    Args:
1196 1197
        x (Tensor): Input Tensor of ``bitwise_or`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        y (Tensor): Input Tensor of ``bitwise_or`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
1198 1199 1200
        out (Tensor, optional): Result of ``bitwise_or`` . It is a N-D Tensor with the same data type of input Tensor. Default: None.
        name (str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1201 1202

    Returns:
1203
        Tensor: Result of ``bitwise_or`` . It is a N-D Tensor with the same data type of input Tensor.
1204 1205 1206 1207

    Examples:
        .. code-block:: python

1208 1209 1210 1211 1212 1213 1214
            >>> import paddle
            >>> x = paddle.to_tensor([-5, -1, 1])
            >>> y = paddle.to_tensor([4,  2, -3])
            >>> res = paddle.bitwise_or(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [-1, -1, -3])
1215
    """
1216
    if in_dynamic_mode() and out is None:
1217
        return _C_ops.bitwise_or(x, y)
H
hong 已提交
1218

1219 1220 1221
    return _bitwise_op(
        op_name="bitwise_or", x=x, y=y, name=name, out=out, binary_op=True
    )
1222 1223


1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
@inplace_apis_in_dygraph_only
def bitwise_or_(x, y, name=None):
    r"""
    Inplace version of ``bitwise_or`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_or`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.bitwise_or_(x, y)


1241
def bitwise_xor(x, y, out=None, name=None):
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
    r"""

    Apply ``bitwise_xor`` on Tensor ``X`` and ``Y`` .

    .. math::
        Out = X ^\wedge Y

    .. note::
        ``paddle.bitwise_xor`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

    .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1253 1254

    Args:
1255 1256
        x (Tensor): Input Tensor of ``bitwise_xor`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        y (Tensor): Input Tensor of ``bitwise_xor`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
1257 1258 1259
        out (Tensor, optional): Result of ``bitwise_xor`` . It is a N-D Tensor with the same data type of input Tensor. Default: None.
        name (str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1260 1261

    Returns:
1262
        Tensor: Result of ``bitwise_xor`` . It is a N-D Tensor with the same data type of input Tensor.
1263 1264 1265 1266

    Examples:
        .. code-block:: python

1267 1268 1269 1270 1271 1272 1273
            >>> import paddle
            >>> x = paddle.to_tensor([-5, -1, 1])
            >>> y = paddle.to_tensor([4,  2, -3])
            >>> res = paddle.bitwise_xor(x, y)
            >>> print(res)
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [-1, -3, -4])
1274
    """
1275
    if in_dynamic_mode() and out is None:
1276
        return _C_ops.bitwise_xor(x, y)
1277 1278 1279
    return _bitwise_op(
        op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True
    )
1280 1281


1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
@inplace_apis_in_dygraph_only
def bitwise_xor_(x, y, name=None):
    r"""
    Inplace version of ``bitwise_xor`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_xor`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.bitwise_xor_(x, y)


1299
def bitwise_not(x, out=None, name=None):
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
    r"""

    Apply ``bitwise_not`` on Tensor ``X``.

    .. math::
        Out = \sim X

    .. note::
        ``paddle.bitwise_not`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1311 1312

    Args:
1313
        x (Tensor): Input Tensor of ``bitwise_not`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
1314 1315 1316
        out (Tensor, optional): Result of ``bitwise_not`` . It is a N-D Tensor with the same data type of input Tensor. Default: None.
        name (str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
1317

1318
    Returns:
1319
        Tensor: Result of ``bitwise_not`` . It is a N-D Tensor with the same data type of input Tensor.
1320 1321 1322 1323

    Examples:
        .. code-block:: python

1324 1325 1326 1327 1328 1329
            >>> import paddle
            >>> x = paddle.to_tensor([-5, -1, 1])
            >>> res = paddle.bitwise_not(x)
            >>> print(res)
            Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
            [ 4,  0, -2])
1330
    """
1331
    if in_dynamic_mode() and out is None:
1332
        return _C_ops.bitwise_not(x)
1333

1334 1335 1336
    return _bitwise_op(
        op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False
    )
A
andyjpaddle 已提交
1337 1338


1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
@inplace_apis_in_dygraph_only
def bitwise_not_(x, name=None):
    r"""
    Inplace version of ``bitwise_not`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_not`.
    """
    if in_dynamic_mode():
        return _C_ops.bitwise_not_(x)


A
andyjpaddle 已提交
1349 1350
@templatedoc()
def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
1351
    r"""
1352
    Check if all :math:`x` and :math:`y` satisfy the condition:
1353 1354 1355 1356 1357 1358 1359 1360

    .. math::

        \left| x - y \right| \leq atol + rtol \times \left| y \right|

    elementwise, for all elements of :math:`x` and :math:`y`. The behaviour of this
    operator is analogous to :math:`numpy.isclose`, namely that it returns :math:`True` if
    two tensors are elementwise equal within a tolerance.
A
andyjpaddle 已提交
1361 1362

    Args:
1363 1364
        x(Tensor): The input tensor, it's data type should be float16, float32, float64, complex64, complex128.
        y(Tensor): The input tensor, it's data type should be float16, float32, float64, complex64, complex128.
A
andyjpaddle 已提交
1365 1366
        rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
        atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
1367
        equal_nan(equalnantype, optional): If :math:`True` , then two :math:`NaNs` will be compared as equal. Default: :math:`False` .
A
andyjpaddle 已提交
1368 1369 1370 1371
        name (str, optional): Name for the operation. For more information, please
            refer to :ref:`api_guide_Name`. Default: None.

    Returns:
1372
        Tensor: The output tensor, it's data type is bool.
A
andyjpaddle 已提交
1373 1374 1375 1376

    Examples:
        .. code-block:: python

1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
            >>> import paddle

            >>> x = paddle.to_tensor([10000., 1e-07])
            >>> y = paddle.to_tensor([10000.1, 1e-08])
            >>> result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
            ...                          equal_nan=False, name="ignore_nan")
            >>> print(result1)
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False])
            >>> result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
            ...                          equal_nan=True, name="equal_nan")
            >>> print(result2)
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False])
            >>> x = paddle.to_tensor([1.0, float('nan')])
            >>> y = paddle.to_tensor([1.0, float('nan')])
            >>> result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
            ...                          equal_nan=False, name="ignore_nan")
            >>> print(result1)
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True , False])
            >>> result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
            ...                          equal_nan=True, name="equal_nan")
            >>> print(result2)
            Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True,
            [True, True])
A
andyjpaddle 已提交
1403 1404
    """

1405
    if in_dynamic_mode():
1406
        return _C_ops.isclose(x, y, rtol, atol, equal_nan)
1407
    else:
1408
        check_variable_and_dtype(
1409 1410 1411 1412
            x,
            "input",
            ['float16', 'float32', 'float64', 'complex64', 'complex128'],
            'isclose',
1413 1414
        )
        check_variable_and_dtype(
1415 1416 1417 1418
            y,
            "input",
            ['float16', 'float32', 'float64', 'complex64', 'complex128'],
            'isclose',
1419
        )
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
        check_type(rtol, 'rtol', float, 'isclose')
        check_type(atol, 'atol', float, 'isclose')
        check_type(equal_nan, 'equal_nan', bool, 'isclose')

        helper = LayerHelper("isclose", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')

        inputs = {'Input': x, 'Other': y}
        outputs = {'Out': out}
        attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
        helper.append_op(
            type='isclose', inputs=inputs, outputs=outputs, attrs=attrs
1432
        )
1433
        return out