logic.py 45.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
# TODO: define logic functions of a tensor

17
import paddle
18

19
from ..common_ops_import import Variable
20 21
from ..fluid.data_feeder import check_type, check_variable_and_dtype
from .layer_function_generator import templatedoc
22

W
wanghuancoder 已提交
23
Tensor = paddle.fluid.framework.core.eager.Tensor
24

25
from paddle import _C_ops
26
from paddle.tensor.creation import full
27 28
from paddle.tensor.math import broadcast_shape
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
29

30
from ..framework import LayerHelper, in_dynamic_mode
31

32 33
__all__ = []

34

35
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
36
    if in_dynamic_mode():
37 38 39 40 41
        op = getattr(_C_ops, op_name)
        if binary_op:
            return op(x, y)
        else:
            return op(x)
42
    else:
43
        check_variable_and_dtype(
44 45
            x,
            "x",
46 47 48 49 50 51 52 53 54
            [
                "bool",
                "int8",
                "int16",
                "int32",
                "int64",
                "float16",
                "float32",
                "float64",
55
                "uint16",
56 57
                "complex64",
                "complex128",
58
            ],
59 60
            op_name,
        )
61 62 63 64 65 66 67 68 69 70
        if y is not None:
            check_variable_and_dtype(
                y,
                "y",
                [
                    "bool",
                    "int8",
                    "int16",
                    "int32",
                    "int64",
71
                    "float16",
72 73
                    "float32",
                    "float64",
74
                    "uint16",
75 76
                    "complex64",
                    "complex128",
77 78 79 80 81
                ],
                op_name,
            )
        if out is not None:
            check_type(out, "out", Variable, op_name)
82

83
        helper = LayerHelper(op_name, **locals())
84

85 86
        if binary_op and x.dtype != y.dtype:
            raise ValueError(
87
                f"(InvalidArgument) The DataType of {op_name} Op's Variable must be consistent, but received {x.dtype} and {y.dtype}."
88
            )
89

90 91
        if out is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
92

93 94 95 96 97 98 99 100
        if binary_op:
            helper.append_op(
                type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
            )
        else:
            helper.append_op(
                type=op_name, inputs={"X": x}, outputs={"Out": out}
            )
101

102
        return out
103 104 105 106 107


def logical_and(x, y, out=None, name=None):
    r"""

108
    Compute element-wise logical AND on ``x`` and ``y``, and return ``out``. ``out`` is N-dim boolean ``Tensor``.
109 110 111 112 113 114
    Each element of ``out`` is calculated by

    .. math::

        out = x \&\& y

115
    Note:
I
Infinity_lee 已提交
116 117 118
        ``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
119 120

    Args:
121 122
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
123
        out(Tensor, optional): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([True])
            y = paddle.to_tensor([True, False, True, False])
            res = paddle.logical_and(x, y)
            print(res) # [True False True False]
    """
139
    if in_dynamic_mode():
140
        return _C_ops.logical_and(x, y)
141

142 143 144
    return _logical_op(
        op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True
    )
145 146


147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
@inplace_apis_in_dygraph_only
def logical_and_(x, y, name=None):
    r"""
    Inplace version of ``logical_and`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logical_and`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.logical_and_(x, y)


164 165 166 167 168 169 170 171 172 173
def logical_or(x, y, out=None, name=None):
    """

    ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
    Each element of ``out`` is calculated by

    .. math::

        out = x || y

174
    Note:
I
Infinity_lee 已提交
175 176 177
        ``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
178

179
    Args:
180 181
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
182 183 184 185 186 187 188 189 190 191 192
        out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.

    Examples:
        .. code-block:: python

            import paddle

193 194
            x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
            y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
195
            res = paddle.logical_or(x, y)
196 197 198 199
            print(res)
            # Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
            #        [[True , True ],
            #         [True , False]])
200
    """
201
    if in_dynamic_mode():
202
        return _C_ops.logical_or(x, y)
203 204 205
    return _logical_op(
        op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True
    )
206 207


208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
@inplace_apis_in_dygraph_only
def logical_or_(x, y, name=None):
    r"""
    Inplace version of ``logical_or`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logical_or`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.logical_or_(x, y)


225 226 227 228 229 230 231 232 233 234
def logical_xor(x, y, out=None, name=None):
    r"""

    ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
    Each element of ``out`` is calculated by

    .. math::

        out = (x || y) \&\& !(x \&\& y)

235
    Note:
I
Infinity_lee 已提交
236 237 238
        ``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
239 240

    Args:
241 242
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128.
243 244 245 246 247 248 249 250 251 252 253
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.

    Examples:
        .. code-block:: python

            import paddle

254 255
            x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
            y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
256
            res = paddle.logical_xor(x, y)
257 258 259 260
            print(res)
            # Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
            #        [[False, True ],
            #         [True , False]])
261
    """
262
    if in_dynamic_mode():
263
        return _C_ops.logical_xor(x, y)
264

265 266 267
    return _logical_op(
        op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True
    )
268 269


270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
@inplace_apis_in_dygraph_only
def logical_xor_(x, y, name=None):
    r"""
    Inplace version of ``logical_xor`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_logical_xor`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.logical_xor_(x, y)


287 288 289 290 291 292 293 294 295 296
def logical_not(x, out=None, name=None):
    """

    ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``.
    Each element of ``out`` is calculated by

    .. math::

        out = !x

I
Infinity_lee 已提交
297 298 299 300 301
    Note:
        ``paddle.logical_not`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor

302
    Args:
303
        x(Tensor):  Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64, complex64, complex128.
304 305 306 307
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
        name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
308
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
309 310 311 312 313 314 315 316 317 318

    Examples:
        .. code-block:: python

            import paddle

            x = paddle.to_tensor([True, False, True, False])
            res = paddle.logical_not(x)
            print(res) # [False  True False  True]
    """
319
    if in_dynamic_mode():
320
        return _C_ops.logical_not(x)
321 322 323
    return _logical_op(
        op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False
    )
324 325


326 327 328 329 330 331 332 333 334 335
@inplace_apis_in_dygraph_only
def logical_not_(x, name=None):
    r"""
    Inplace version of ``logical_not`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_logical_not`.
    """
    if in_dynamic_mode():
        return _C_ops.logical_not_(x)


336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
def is_empty(x, name=None):
    """

    Test whether a Tensor is empty.

    Args:
        x (Tensor): The Tensor to be tested.
        name (str, optional): The default value is ``None`` . Normally users
                            don't have to set this parameter. For more information,
                            please refer to :ref:`api_guide_Name` .

    Returns:
        Tensor: A bool scalar Tensor. True if 'x' is an empty Tensor.

    Examples:
        .. code-block:: python

            import paddle

            input = paddle.rand(shape=[4, 32, 32], dtype='float32')
            res = paddle.is_empty(x=input)
357 358
            # res: Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
            #        False)
359 360

    """
361
    if in_dynamic_mode():
362
        return _C_ops.is_empty(x)
363 364 365 366 367
    else:
        check_variable_and_dtype(
            x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
        )
        check_type(name, "name", (str, type(None)), "is_empty")
368

369 370 371 372 373 374 375
        helper = LayerHelper("is_empty", **locals())
        cond = helper.create_variable_for_type_inference(dtype='bool')
        cond.stop_gradient = True
        helper.append_op(
            type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}
        )
        return cond
376 377


W
wawltor 已提交
378
def equal_all(x, y, name=None):
379
    """
380
    Returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
381

382
    Note:
383
        The output has no gradient.
384 385

    Args:
386 387
        x(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
        y(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
W
wawltor 已提交
388 389
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
390 391

    Returns:
W
wawltor 已提交
392
        Tensor: output Tensor, data type is bool, value is [False] or [True].
393 394 395 396 397

    Examples:
        .. code-block:: python

          import paddle
W
wawltor 已提交
398

399 400 401
          x = paddle.to_tensor([1, 2, 3])
          y = paddle.to_tensor([1, 2, 3])
          z = paddle.to_tensor([1, 4, 3])
W
wawltor 已提交
402
          result1 = paddle.equal_all(x, y)
403
          print(result1) # result1 = True
W
wawltor 已提交
404
          result2 = paddle.equal_all(x, z)
405
          print(result2) # result2 = False
406
    """
407
    if in_dynamic_mode():
408
        return _C_ops.equal_all(x, y)
409 410 411 412 413 414 415 416 417
    else:
        helper = LayerHelper("equal_all", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        helper.append_op(
            type='equal_all',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
Z
Zhen Wang 已提交
418 419 420


@templatedoc()
421
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
422 423 424 425 426 427
    r"""
    Check if all :math:`x` and :math:`y` satisfy the condition:

    .. math::
        \left| x - y \right| \leq atol + rtol \times \left| y \right|

H
hg-1099255210 已提交
428
    elementwise, for all elements of :math:`x` and :math:`y`. This is analogous to :math:`numpy.allclose`, namely that it returns :math:`True` if
429
    two tensors are elementwise equal within a tolerance.
Z
Zhen Wang 已提交
430 431

    Args:
432 433
        x(Tensor): The input tensor, it's data type should be float16, float32, float64..
        y(Tensor): The input tensor, it's data type should be float16, float32, float64..
H
huangxu96 已提交
434 435
        rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
        atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
436 437 438
        equal_nan(equalnantype, optional): ${equal_nan_comment}.
        name (str, optional): Name for the operation. For more information, please
            refer to :ref:`api_guide_Name`. Default: None.
Z
Zhen Wang 已提交
439 440

    Returns:
441
        Tensor: The output tensor, it's data type is bool.
442

Z
Zhen Wang 已提交
443 444 445 446 447
    Examples:
        .. code-block:: python

          import paddle

448 449
          x = paddle.to_tensor([10000., 1e-07])
          y = paddle.to_tensor([10000.1, 1e-08])
450
          result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
Z
Zhen Wang 已提交
451
                                  equal_nan=False, name="ignore_nan")
452
          # False
453

454
          result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
Z
Zhen Wang 已提交
455
                                      equal_nan=True, name="equal_nan")
456
          # False
457

458 459
          x = paddle.to_tensor([1.0, float('nan')])
          y = paddle.to_tensor([1.0, float('nan')])
460 461
          result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
                                  equal_nan=False, name="ignore_nan")
462
          # False
463

464 465
          result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
                                      equal_nan=True, name="equal_nan")
466
          # True
Z
Zhen Wang 已提交
467 468
    """

469
    if in_dynamic_mode():
470
        return _C_ops.allclose(x, y, rtol, atol, equal_nan)
471
    else:
472 473 474 475 476 477
        check_variable_and_dtype(
            x, "input", ['float16', 'float32', 'float64'], 'allclose'
        )
        check_variable_and_dtype(
            y, "input", ['float16', 'float32', 'float64'], 'allclose'
        )
478 479 480 481 482 483 484 485 486 487 488 489
        check_type(rtol, 'rtol', float, 'allclose')
        check_type(atol, 'atol', float, 'allclose')
        check_type(equal_nan, 'equal_nan', bool, 'allclose')

        helper = LayerHelper("allclose", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')

        inputs = {'Input': x, 'Other': y}
        outputs = {'Out': out}
        attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
        helper.append_op(
            type='allclose', inputs=inputs, outputs=outputs, attrs=attrs
490
        )
Z
Zhen Wang 已提交
491

492
        return out
493 494


W
wawltor 已提交
495 496
@templatedoc()
def equal(x, y, name=None):
497
    """
S
swtkiwi 已提交
498

499
    This layer returns the truth value of :math:`x == y` elementwise.
N
Noel 已提交
500

501
    Note:
502
        The output has no gradient.
503 504

    Args:
陈沧夜 已提交
505 506
        x(Tensor): Tensor, data type is bool, float16, float32, float64, int32, int64.
        y(Tensor): Tensor, data type is bool, float16, float32, float64, int32, int64.
507 508 509 510
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
W
wawltor 已提交
511
        Tensor: output Tensor, it's shape is the same as the input's Tensor,
512
        and the data type is bool. The result of this op is stop_gradient.
513 514 515 516

    Examples:
        .. code-block:: python

W
wawltor 已提交
517 518
          import paddle

519 520
          x = paddle.to_tensor([1, 2, 3])
          y = paddle.to_tensor([1, 3, 2])
W
wawltor 已提交
521
          result1 = paddle.equal(x, y)
N
Noel 已提交
522
          print(result1)  # result1 = [True False False]
523
    """
524 525
    if not isinstance(y, (int, bool, float, Variable)):
        raise TypeError(
526 527 528 529
            "Type of input args must be float, bool, int or Tensor, but received type {}".format(
                type(y)
            )
        )
530
    if not isinstance(y, Variable):
531
        y = full(shape=[], dtype=x.dtype, fill_value=y)
532

533
    if in_dynamic_mode():
534
        return _C_ops.equal(x, y)
J
Jiabin Yang 已提交
535
    else:
536 537 538
        check_variable_and_dtype(
            x,
            "x",
539 540 541 542 543 544 545 546 547
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
548 549 550 551 552
            "equal",
        )
        check_variable_and_dtype(
            y,
            "y",
553 554 555 556 557 558 559 560 561
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
562 563 564 565 566
            "equal",
        )
        helper = LayerHelper("equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
567

568 569 570 571 572 573
        helper.append_op(
            type='equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
574

W
wawltor 已提交
575

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
@inplace_apis_in_dygraph_only
def equal_(x, y, name=None):
    r"""
    Inplace version of ``equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.equal_(x, y)


W
wawltor 已提交
593 594 595
@templatedoc()
def greater_equal(x, y, name=None):
    """
596
    Returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
N
Noel 已提交
597

598
    Note:
599
        The output has no gradient.
W
wawltor 已提交
600 601

    Args:
602 603
        x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
W
wawltor 已提交
604 605 606
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
    Returns:
607
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
608 609 610

    Examples:
        .. code-block:: python
N
Noel 已提交
611

W
wawltor 已提交
612 613
            import paddle

614 615
            x = paddle.to_tensor([1, 2, 3])
            y = paddle.to_tensor([1, 3, 2])
W
wawltor 已提交
616
            result1 = paddle.greater_equal(x, y)
N
Noel 已提交
617
            print(result1)  # result1 = [True False True]
W
wawltor 已提交
618
    """
619
    if in_dynamic_mode():
620
        return _C_ops.greater_equal(x, y)
J
Jiabin Yang 已提交
621
    else:
622 623 624
        check_variable_and_dtype(
            x,
            "x",
625 626 627 628 629 630 631 632 633
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
634 635 636 637 638
            "greater_equal",
        )
        check_variable_and_dtype(
            y,
            "y",
639 640 641 642 643 644 645 646 647
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
648 649 650 651 652
            "greater_equal",
        )
        helper = LayerHelper("greater_equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
653

654 655 656 657 658 659
        helper.append_op(
            type='greater_equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
660 661


662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
@inplace_apis_in_dygraph_only
def greater_equal_(x, y, name=None):
    r"""
    Inplace version of ``greater_equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_greater_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.greater_equal_(x, y)


W
wawltor 已提交
679 680 681
@templatedoc()
def greater_than(x, y, name=None):
    """
682
    Returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
N
Noel 已提交
683

684
    Note:
685
        The output has no gradient.
W
wawltor 已提交
686 687

    Args:
J
Jx-qi 已提交
688 689
        x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
W
wawltor 已提交
690 691 692
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
    Returns:
693
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
694 695 696

    Examples:
        .. code-block:: python
N
Noel 已提交
697

W
wawltor 已提交
698 699
            import paddle

700 701
            x = paddle.to_tensor([1, 2, 3])
            y = paddle.to_tensor([1, 3, 2])
W
wawltor 已提交
702
            result1 = paddle.greater_than(x, y)
N
Noel 已提交
703
            print(result1)  # result1 = [False False True]
W
wawltor 已提交
704
    """
705
    if in_dynamic_mode():
706
        return _C_ops.greater_than(x, y)
J
Jiabin Yang 已提交
707
    else:
708 709 710
        check_variable_and_dtype(
            x,
            "x",
711 712 713 714 715 716 717 718 719
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
720 721 722 723 724
            "greater_than",
        )
        check_variable_and_dtype(
            y,
            "y",
725 726 727 728 729 730 731 732 733
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
734 735 736 737 738
            "greater_than",
        )
        helper = LayerHelper("greater_than", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
739

740 741 742 743 744 745
        helper.append_op(
            type='greater_than',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
746 747


748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
@inplace_apis_in_dygraph_only
def greater_than_(x, y, name=None):
    r"""
    Inplace version of ``greater_than`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_greater_than`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.greater_than_(x, y)


W
wawltor 已提交
765 766 767
@templatedoc()
def less_equal(x, y, name=None):
    """
768
    Returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
N
Noel 已提交
769

770
    Note:
771
        The output has no gradient.
W
wawltor 已提交
772 773

    Args:
B
BellaZYL 已提交
774 775
        x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
W
wawltor 已提交
776 777 778 779
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
780
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
781 782 783

    Examples:
        .. code-block:: python
N
Noel 已提交
784

W
wawltor 已提交
785 786
            import paddle

787 788
            x = paddle.to_tensor([1, 2, 3])
            y = paddle.to_tensor([1, 3, 2])
W
wawltor 已提交
789
            result1 = paddle.less_equal(x, y)
N
Noel 已提交
790
            print(result1)  # result1 = [True True False]
W
wawltor 已提交
791
    """
792
    if in_dynamic_mode():
793
        return _C_ops.less_equal(x, y)
J
Jiabin Yang 已提交
794
    else:
795 796 797
        check_variable_and_dtype(
            x,
            "x",
798 799 800 801 802 803 804 805 806
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
807 808 809 810 811
            "less_equal",
        )
        check_variable_and_dtype(
            y,
            "y",
812 813 814 815 816 817 818 819 820
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
821 822 823 824 825
            "less_equal",
        )
        helper = LayerHelper("less_equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
826

827 828 829 830 831 832
        helper.append_op(
            type='less_equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
833 834


835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
@inplace_apis_in_dygraph_only
def less_equal_(x, y, name=None):
    r"""
    Inplace version of ``less_equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_less_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.less_equal_(x, y)


W
wawltor 已提交
852 853 854
@templatedoc()
def less_than(x, y, name=None):
    """
855
    Returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
N
Noel 已提交
856

857
    Note:
858
        The output has no gradient.
W
wawltor 已提交
859 860

    Args:
H
hh-qiao 已提交
861 862
        x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
        y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float16, float32, float64, int32, int64.
W
wawltor 已提交
863 864 865 866
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
867
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
868 869 870

    Examples:
        .. code-block:: python
N
Noel 已提交
871

W
wawltor 已提交
872 873
            import paddle

874 875
            x = paddle.to_tensor([1, 2, 3])
            y = paddle.to_tensor([1, 3, 2])
W
wawltor 已提交
876
            result1 = paddle.less_than(x, y)
N
Noel 已提交
877
            print(result1)  # result1 = [False True False]
W
wawltor 已提交
878
    """
879
    if in_dynamic_mode():
880
        return _C_ops.less_than(x, y)
J
Jiabin Yang 已提交
881
    else:
882 883 884
        check_variable_and_dtype(
            x,
            "x",
885 886 887 888 889 890 891 892 893
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
894 895 896 897 898
            "less_than",
        )
        check_variable_and_dtype(
            y,
            "y",
899 900 901 902 903 904 905 906 907
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
908 909 910 911 912
            "less_than",
        )
        helper = LayerHelper("less_than", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
913

914 915 916 917 918 919
        helper.append_op(
            type='less_than',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
W
wawltor 已提交
920 921


922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
@inplace_apis_in_dygraph_only
def less_than_(x, y, name=None):
    r"""
    Inplace version of ``less_than`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_less_than`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.less_than_(x, y)


W
wawltor 已提交
939 940 941
@templatedoc()
def not_equal(x, y, name=None):
    """
942
    Returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
943 944

    Note:
945
        The output has no gradient.
W
wawltor 已提交
946 947

    Args:
948 949
        x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
        y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
W
wawltor 已提交
950 951 952 953
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`.

    Returns:
954
        Tensor: The output shape is same as input :attr:`x`. The output data type is bool.
W
wawltor 已提交
955 956 957

    Examples:
        .. code-block:: python
958

W
wawltor 已提交
959 960
            import paddle

961 962
            x = paddle.to_tensor([1, 2, 3])
            y = paddle.to_tensor([1, 3, 2])
W
wawltor 已提交
963
            result1 = paddle.not_equal(x, y)
N
Noel 已提交
964
            print(result1)  # result1 = [False True True]
W
wawltor 已提交
965
    """
966
    if in_dynamic_mode():
967
        return _C_ops.not_equal(x, y)
J
Jiabin Yang 已提交
968
    else:
969 970 971
        check_variable_and_dtype(
            x,
            "x",
972 973 974 975 976 977 978 979 980
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
981 982 983 984 985
            "not_equal",
        )
        check_variable_and_dtype(
            y,
            "y",
986 987 988 989 990 991 992 993 994
            [
                "bool",
                "float16",
                "float32",
                "float64",
                "int32",
                "int64",
                "uint16",
            ],
995 996 997 998 999
            "not_equal",
        )
        helper = LayerHelper("not_equal", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')
        out.stop_gradient = True
J
Jiabin Yang 已提交
1000

1001 1002 1003 1004 1005 1006
        helper.append_op(
            type='not_equal',
            inputs={'X': [x], 'Y': [y]},
            outputs={'Out': [out]},
        )
        return out
Z
zhulei 已提交
1007 1008


1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
@inplace_apis_in_dygraph_only
def not_equal_(x, y, name=None):
    r"""
    Inplace version of ``not_equal`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_not_equal`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.not_equal_(x, y)


Z
zhulei 已提交
1026 1027 1028
def is_tensor(x):
    """

C
Chen Long 已提交
1029
    Tests whether input object is a paddle.Tensor.
Z
zhulei 已提交
1030 1031 1032 1033 1034

    Args:
        x (object): Object to test.

    Returns:
C
Chen Long 已提交
1035
        A boolean value. True if ``x`` is a paddle.Tensor, otherwise False.
Z
zhulei 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

    Examples:
        .. code-block:: python

            import paddle

            input1 = paddle.rand(shape=[2, 3, 5], dtype='float32')
            check = paddle.is_tensor(input1)
            print(check)  #True

            input3 = [1, 4]
            check = paddle.is_tensor(input3)
            print(check)  #False
1049

Z
zhulei 已提交
1050
    """
1051
    if in_dynamic_mode():
1052 1053 1054
        return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor))
    else:
        return isinstance(x, Variable)
1055 1056 1057


def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
1058
    if in_dynamic_mode():
W
wanghuancoder 已提交
1059
        op = getattr(_C_ops, op_name)
1060 1061 1062 1063
        if binary_op:
            return op(x, y)
        else:
            return op(x)
1064
    else:
1065
        check_variable_and_dtype(
1066 1067
            x,
            "x",
1068 1069 1070
            ["bool", "uint8", "int8", "int16", "int32", "int64"],
            op_name,
        )
1071 1072 1073 1074 1075 1076 1077 1078 1079
        if y is not None:
            check_variable_and_dtype(
                y,
                "y",
                ["bool", "uint8", "int8", "int16", "int32", "int64"],
                op_name,
            )
        if out is not None:
            check_type(out, "out", Variable, op_name)
1080

1081 1082 1083
        helper = LayerHelper(op_name, **locals())
        if binary_op:
            assert x.dtype == y.dtype
1084

1085 1086
        if out is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
1087

1088 1089 1090 1091 1092 1093 1094 1095
        if binary_op:
            helper.append_op(
                type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
            )
        else:
            helper.append_op(
                type=op_name, inputs={"X": x}, outputs={"Out": out}
            )
1096

1097
        return out
1098 1099 1100


def bitwise_and(x, y, out=None, name=None):
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
    r"""

    Apply ``bitwise_and`` on Tensor ``X`` and ``Y`` .

    .. math::
        Out = X \& Y

    .. note::
        ``paddle.bitwise_and`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

    .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1112

1113
    Args:
1114 1115 1116
        x (Tensor): Input Tensor of ``bitwise_and`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        y (Tensor): Input Tensor of ``bitwise_and`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        out(Tensor): Result of ``bitwise_and`` . It is a N-D Tensor with the same data type of input Tensor.
1117 1118

    Returns:
1119
        Tensor: Result of ``bitwise_and`` . It is a N-D Tensor with the same data type of input Tensor.
1120

1121 1122 1123 1124 1125 1126 1127 1128 1129
    Examples:
        .. code-block:: python

            import paddle
            x = paddle.to_tensor([-5, -1, 1])
            y = paddle.to_tensor([4,  2, -3])
            res = paddle.bitwise_and(x, y)
            print(res)  # [0, 2, 1]
    """
1130
    if in_dynamic_mode() and out is None:
1131
        return _C_ops.bitwise_and(x, y)
1132 1133 1134
    return _bitwise_op(
        op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True
    )
1135 1136


1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
@inplace_apis_in_dygraph_only
def bitwise_and_(x, y, name=None):
    r"""
    Inplace version of ``bitwise_and`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_and`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.bitwise_and_(x, y)


1154
def bitwise_or(x, y, out=None, name=None):
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
    r"""

    Apply ``bitwise_or`` on Tensor ``X`` and ``Y`` .

    .. math::
        Out = X | Y

    .. note::
        ``paddle.bitwise_or`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

    .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1166

1167
    Args:
1168 1169 1170
        x (Tensor): Input Tensor of ``bitwise_or`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        y (Tensor): Input Tensor of ``bitwise_or`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        out(Tensor): Result of ``bitwise_or`` . It is a N-D Tensor with the same data type of input Tensor.
1171 1172

    Returns:
1173
        Tensor: Result of ``bitwise_or`` . It is a N-D Tensor with the same data type of input Tensor.
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.to_tensor([-5, -1, 1])
            y = paddle.to_tensor([4,  2, -3])
            res = paddle.bitwise_or(x, y)
            print(res)  # [-1, -1, -3]
    """
1184
    if in_dynamic_mode() and out is None:
1185
        return _C_ops.bitwise_or(x, y)
H
hong 已提交
1186

1187 1188 1189
    return _bitwise_op(
        op_name="bitwise_or", x=x, y=y, name=name, out=out, binary_op=True
    )
1190 1191


1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
@inplace_apis_in_dygraph_only
def bitwise_or_(x, y, name=None):
    r"""
    Inplace version of ``bitwise_or`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_or`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.bitwise_or_(x, y)


1209
def bitwise_xor(x, y, out=None, name=None):
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
    r"""

    Apply ``bitwise_xor`` on Tensor ``X`` and ``Y`` .

    .. math::
        Out = X ^\wedge Y

    .. note::
        ``paddle.bitwise_xor`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

    .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1221 1222

    Args:
1223 1224 1225
        x (Tensor): Input Tensor of ``bitwise_xor`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        y (Tensor): Input Tensor of ``bitwise_xor`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        out(Tensor): Result of ``bitwise_xor`` . It is a N-D Tensor with the same data type of input Tensor.
1226 1227

    Returns:
1228
        Tensor: Result of ``bitwise_xor`` . It is a N-D Tensor with the same data type of input Tensor.
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.to_tensor([-5, -1, 1])
            y = paddle.to_tensor([4,  2, -3])
            res = paddle.bitwise_xor(x, y)
            print(res) # [-1, -3, -4]
    """
1239
    if in_dynamic_mode() and out is None:
1240
        return _C_ops.bitwise_xor(x, y)
1241 1242 1243
    return _bitwise_op(
        op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True
    )
1244 1245


1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
@inplace_apis_in_dygraph_only
def bitwise_xor_(x, y, name=None):
    r"""
    Inplace version of ``bitwise_xor`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_xor`.
    """
    out_shape = broadcast_shape(x.shape, y.shape)
    if out_shape != x.shape:
        raise ValueError(
            "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
                out_shape, x.shape
            )
        )
    if in_dynamic_mode():
        return _C_ops.bitwise_xor_(x, y)


1263
def bitwise_not(x, out=None, name=None):
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
    r"""

    Apply ``bitwise_not`` on Tensor ``X``.

    .. math::
        Out = \sim X

    .. note::
        ``paddle.bitwise_not`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ .

        .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor.
1275 1276

    Args:
1277 1278
        x (Tensor): Input Tensor of ``bitwise_not`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64.
        out(Tensor): Result of ``bitwise_not`` . It is a N-D Tensor with the same data type of input Tensor.
1279

1280
    Returns:
1281
        Tensor: Result of ``bitwise_not`` . It is a N-D Tensor with the same data type of input Tensor.
1282 1283 1284 1285 1286 1287 1288 1289 1290

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.to_tensor([-5, -1, 1])
            res = paddle.bitwise_not(x)
            print(res) # [4, 0, -2]
    """
1291
    if in_dynamic_mode() and out is None:
1292
        return _C_ops.bitwise_not(x)
1293

1294 1295 1296
    return _bitwise_op(
        op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False
    )
A
andyjpaddle 已提交
1297 1298


1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
@inplace_apis_in_dygraph_only
def bitwise_not_(x, name=None):
    r"""
    Inplace version of ``bitwise_not`` API, the output Tensor will be inplaced with input ``x``.
    Please refer to :ref:`api_paddle_bitwise_not`.
    """
    if in_dynamic_mode():
        return _C_ops.bitwise_not_(x)


A
andyjpaddle 已提交
1309 1310
@templatedoc()
def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
1311
    r"""
1312
    Check if all :math:`x` and :math:`y` satisfy the condition:
1313 1314 1315 1316 1317 1318 1319 1320

    .. math::

        \left| x - y \right| \leq atol + rtol \times \left| y \right|

    elementwise, for all elements of :math:`x` and :math:`y`. The behaviour of this
    operator is analogous to :math:`numpy.isclose`, namely that it returns :math:`True` if
    two tensors are elementwise equal within a tolerance.
A
andyjpaddle 已提交
1321 1322

    Args:
1323 1324
        x(Tensor): The input tensor, it's data type should be float16, float32, float64, complex64, complex128.
        y(Tensor): The input tensor, it's data type should be float16, float32, float64, complex64, complex128.
A
andyjpaddle 已提交
1325 1326
        rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
        atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
1327
        equal_nan(equalnantype, optional): If :math:`True` , then two :math:`NaNs` will be compared as equal. Default: :math:`False` .
A
andyjpaddle 已提交
1328 1329 1330 1331
        name (str, optional): Name for the operation. For more information, please
            refer to :ref:`api_guide_Name`. Default: None.

    Returns:
1332
        Tensor: The output tensor, it's data type is bool.
A
andyjpaddle 已提交
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357

    Examples:
        .. code-block:: python

          import paddle

          x = paddle.to_tensor([10000., 1e-07])
          y = paddle.to_tensor([10000.1, 1e-08])
          result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
                                  equal_nan=False, name="ignore_nan")
          # [True, False]
          result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
                                      equal_nan=True, name="equal_nan")
          # [True, False]

          x = paddle.to_tensor([1.0, float('nan')])
          y = paddle.to_tensor([1.0, float('nan')])
          result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
                                  equal_nan=False, name="ignore_nan")
          # [True, False]
          result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
                                      equal_nan=True, name="equal_nan")
          # [True, True]
    """

1358
    if in_dynamic_mode():
1359
        return _C_ops.isclose(x, y, rtol, atol, equal_nan)
1360
    else:
1361
        check_variable_and_dtype(
1362 1363 1364 1365
            x,
            "input",
            ['float16', 'float32', 'float64', 'complex64', 'complex128'],
            'isclose',
1366 1367
        )
        check_variable_and_dtype(
1368 1369 1370 1371
            y,
            "input",
            ['float16', 'float32', 'float64', 'complex64', 'complex128'],
            'isclose',
1372
        )
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
        check_type(rtol, 'rtol', float, 'isclose')
        check_type(atol, 'atol', float, 'isclose')
        check_type(equal_nan, 'equal_nan', bool, 'isclose')

        helper = LayerHelper("isclose", **locals())
        out = helper.create_variable_for_type_inference(dtype='bool')

        inputs = {'Input': x, 'Other': y}
        outputs = {'Out': out}
        attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
        helper.append_op(
            type='isclose', inputs=inputs, outputs=outputs, attrs=attrs
1385
        )
1386
        return out