未验证 提交 1a92098a 编写于 作者: 2 201716010711 提交者: GitHub

clean fluid task: transfer logical_and api (#48341)

上级 20c3224d
...@@ -36,7 +36,6 @@ from paddle.fluid.layers import ( ...@@ -36,7 +36,6 @@ from paddle.fluid.layers import (
from paddle.fluid.layers import ( from paddle.fluid.layers import (
cast, cast,
control_flow, control_flow,
logical_and,
nn, nn,
) )
from paddle.fluid.layers.control_flow import ( from paddle.fluid.layers.control_flow import (
...@@ -233,7 +232,7 @@ def convert_logical_and(x_func, y_func): ...@@ -233,7 +232,7 @@ def convert_logical_and(x_func, y_func):
def _run_paddle_logical_and(x, y): def _run_paddle_logical_and(x, y):
x = cast_bool_if_necessary(x) x = cast_bool_if_necessary(x)
y = cast_bool_if_necessary(y) y = cast_bool_if_necessary(y)
return logical_and(x, y) return paddle.logical_and(x, y)
def _run_py_logical_and(x_func, y_func): def _run_py_logical_and(x_func, y_func):
......
...@@ -27,7 +27,6 @@ from ..framework import ( ...@@ -27,7 +27,6 @@ from ..framework import (
in_dygraph_mode, in_dygraph_mode,
) )
from ..layer_helper import LayerHelper, unique_name from ..layer_helper import LayerHelper, unique_name
from .nn import logical_and
from .utils import ( from .utils import (
assert_same_structure, assert_same_structure,
map_structure, map_structure,
...@@ -3278,12 +3277,12 @@ class Switch: ...@@ -3278,12 +3277,12 @@ class Switch:
else: else:
pre_cond_num = len(self.pre_not_conditions) pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1] pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and( new_not_cond = paddle.logical_and(
x=pre_not_cond, y=paddle.logical_not(x=condition) x=pre_not_cond, y=paddle.logical_not(x=condition)
) )
self.pre_not_conditions.append(new_not_cond) self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock( cond_block = ConditionalBlock(
[logical_and(x=pre_not_cond, y=condition)], [paddle.logical_and(x=pre_not_cond, y=condition)],
is_scalar_condition=True, is_scalar_condition=True,
) )
......
...@@ -124,7 +124,6 @@ __all__ = [ ...@@ -124,7 +124,6 @@ __all__ = [
'strided_slice', 'strided_slice',
'shape', 'shape',
'size', 'size',
'logical_and',
'clip', 'clip',
'clip_by_norm', 'clip_by_norm',
'mean', 'mean',
...@@ -8435,46 +8434,6 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -8435,46 +8434,6 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
return out return out
def logical_and(x, y, out=None, name=None):
r"""
``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by
.. math::
out = x \&\& y
.. note::
``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y)
print(res) # [True False True False]
"""
if in_dygraph_mode():
return _C_ops.logical_and(x, y)
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True
)
@templatedoc() @templatedoc()
def clip(x, min, max, name=None): def clip(x, min, max, name=None):
""" """
......
...@@ -1720,7 +1720,7 @@ def _dynamic_decode_declarative( ...@@ -1720,7 +1720,7 @@ def _dynamic_decode_declarative(
states_arrays, states_arrays,
) )
if max_step_num is not None: if max_step_num is not None:
control_flow.logical_and( paddle.logical_and(
paddle.logical_not(nn.reduce_all(global_finished)), paddle.logical_not(nn.reduce_all(global_finished)),
control_flow.less_equal(step_idx, max_step_num), control_flow.less_equal(step_idx, max_step_num),
cond, cond,
......
...@@ -166,7 +166,7 @@ def decoder_decode(context, is_sparse): ...@@ -166,7 +166,7 @@ def decoder_decode(context, is_sparse):
# source sentences have ended. # source sentences have ended.
length_cond = pd.less_than(x=counter, y=array_len) length_cond = pd.less_than(x=counter, y=array_len)
finish_cond = paddle.logical_not(pd.is_empty(x=selected_ids)) finish_cond = paddle.logical_not(pd.is_empty(x=selected_ids))
pd.logical_and(x=length_cond, y=finish_cond, out=cond) paddle.logical_and(x=length_cond, y=finish_cond, out=cond)
translation_ids, translation_scores = pd.beam_search_decode( translation_ids, translation_scores = pd.beam_search_decode(
ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10 ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10
......
...@@ -1863,7 +1863,7 @@ def fast_decode( ...@@ -1863,7 +1863,7 @@ def fast_decode(
layers.assign(pre_caches[i]["v"], caches[i]["v"]) layers.assign(pre_caches[i]["v"], caches[i]["v"])
length_cond = layers.less_than(x=step_idx, y=max_len) length_cond = layers.less_than(x=step_idx, y=max_len)
finish_cond = paddle.logical_not(layers.is_empty(x=selected_ids)) finish_cond = paddle.logical_not(layers.is_empty(x=selected_ids))
layers.logical_and(x=length_cond, y=finish_cond, out=cond) paddle.logical_and(x=length_cond, y=finish_cond, out=cond)
finished_ids, finished_scores = layers.beam_search_decode( finished_ids, finished_scores = layers.beam_search_decode(
ids, scores, beam_size=beam_size, end_id=eos_idx ids, scores, beam_size=beam_size, end_id=eos_idx
......
...@@ -351,9 +351,9 @@ def bmn_loss_func( ...@@ -351,9 +351,9 @@ def bmn_loss_func(
gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask) gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask)
u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE) u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE)
u_mmask = fluid.layers.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3) u_mmask = paddle.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3)
u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE) u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE)
u_lmask = fluid.layers.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.0) u_lmask = paddle.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.0)
u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE) u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE)
u_lmask = fluid.layers.elementwise_mul(u_lmask, mask) u_lmask = fluid.layers.elementwise_mul(u_lmask, mask)
......
...@@ -32,7 +32,7 @@ class TestLogicalAnd(IPUOpTest): ...@@ -32,7 +32,7 @@ class TestLogicalAnd(IPUOpTest):
return False return False
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.logical_and self.op = paddle.logical_and
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册