未验证 提交 57a5403e 编写于 作者: 2 201716010711 提交者: GitHub

transfer logical_or api (#48343)

上级 a1bdc652
......@@ -37,7 +37,6 @@ from paddle.fluid.layers import (
cast,
control_flow,
logical_and,
logical_or,
nn,
)
from paddle.fluid.layers.control_flow import (
......@@ -285,7 +284,7 @@ def convert_logical_or(x_func, y_func):
def _run_paddle_logical_or(x, y):
x = cast_bool_if_necessary(x)
y = cast_bool_if_necessary(y)
return logical_or(x, y)
return paddle.logical_or(x, y)
def _run_py_logical_or(x_func, y_func):
......
......@@ -27,7 +27,7 @@ from ..framework import (
in_dygraph_mode,
)
from ..layer_helper import LayerHelper, unique_name
from .nn import logical_and, logical_or
from .nn import logical_and
from .utils import (
assert_same_structure,
map_structure,
......
......@@ -127,7 +127,6 @@ __all__ = [
'shape',
'size',
'logical_and',
'logical_or',
'clip',
'clip_by_norm',
'mean',
......@@ -8575,48 +8574,6 @@ def logical_and(x, y, out=None, name=None):
)
def logical_or(x, y, out=None, name=None):
"""
``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = x || y
.. note::
``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool_).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool_).reshape(2, 2)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y)
print(res) # [[ True True] [ True False]]
"""
if in_dygraph_mode():
return _C_ops.logical_or(x, y)
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True
)
@templatedoc()
def clip(x, min, max, name=None):
"""
......
......@@ -1335,7 +1335,7 @@ class BeamSearchDecoder(Decoder):
next_lengths = next_lengths + tensor.cast(
paddle.logical_not(next_finished), beam_state.lengths.dtype
)
next_finished = control_flow.logical_or(
next_finished = paddle.logical_or(
next_finished,
control_flow.equal(token_indices, self.end_token_tensor),
)
......@@ -1499,7 +1499,7 @@ def _dynamic_decode_imperative(
# beams would be reordered and the finished status of each
# entry might change. Otherwise, perform logical OR which
# would not change the already finished.
next_finished = control_flow.logical_or(next_finished, finished)
next_finished = paddle.logical_or(next_finished, finished)
# To confirm states.finished/finished be consistent with
# next_finished.
tensor.assign(next_finished, finished)
......@@ -1662,9 +1662,7 @@ def _dynamic_decode_declarative(
# be reordered and the finished status of each entry might change.
# Otherwise, perform logical OR which would not change the already
# finished.
next_finished = control_flow.logical_or(
next_finished, global_finished
)
next_finished = paddle.logical_or(next_finished, global_finished)
next_sequence_lengths = nn.elementwise_add(
sequence_lengths,
tensor.cast(
......
......@@ -488,7 +488,7 @@ class BaseModel(fluid.dygraph.Layer):
]
next_finished = self._gather(beam_finished, beam_indices, batch_pos)
next_finished = fluid.layers.cast(next_finished, "bool")
next_finished = fluid.layers.logical_or(
next_finished = paddle.logical_or(
next_finished,
fluid.layers.equal(token_indices, end_token_tensor),
)
......
......@@ -870,7 +870,7 @@ class Transformer(Layer):
)
log_probs = gather(log_probs, topk_indices, batch_pos)
finished = gather(finished, beam_indices, batch_pos)
finished = layers.logical_or(
finished = paddle.logical_or(
finished, layers.equal(token_indices, end_token_tensor)
)
trg_word = paddle.reshape(token_indices, [-1, 1])
......
......@@ -84,7 +84,7 @@ class TestLogicalAnd(IPUOpTest):
class TestLogicalOr(TestLogicalAnd):
def set_test_op(self):
self.op = paddle.fluid.layers.logical_or
self.op = paddle.logical_or
if __name__ == "__main__":
......
......@@ -61,7 +61,7 @@ class TestWhileOp(unittest.TestCase):
array_len2 = layers.fill_constant(shape=[1], dtype='int32', value=3)
array_len2 = layers.cast(array_len2, 'int64')
array_len2.stop_gradient = True
cond2 = layers.logical_or(x=j, y=array_len2)
cond2 = paddle.logical_or(x=j, y=array_len2)
cond2 = layers.ones(shape=[1], dtype='int32')
cond2 = layers.cast(cond2, 'bool')
while_op = layers.While(cond=cond)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册