未验证 提交 1e8346fe 编写于 作者: 2 201716010711 提交者: GitHub

delete logical_not api (#48078)

上级 e0dd4ee9
......@@ -37,7 +37,6 @@ from paddle.fluid.layers import (
cast,
control_flow,
logical_and,
logical_not,
logical_or,
nn,
)
......@@ -318,7 +317,7 @@ def convert_logical_not(x):
def _run_paddle_logical_not(x):
x = cast_bool_if_necessary(x)
return logical_not(x)
return paddle.logical_not(x)
def _run_py_logical_not(x):
......
......@@ -27,7 +27,7 @@ from ..framework import (
in_dygraph_mode,
)
from ..layer_helper import LayerHelper, unique_name
from .nn import logical_and, logical_not, logical_or
from .nn import logical_and, logical_or
from .utils import (
assert_same_structure,
map_structure,
......@@ -49,6 +49,7 @@ from ..data_feeder import (
check_dtype,
)
from ..backward import _infer_var_data_type_shape_
import paddle
from paddle import _C_ops, _legacy_C_ops
__all__ = [
......@@ -2807,7 +2808,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
)
)
false_cond_block = ConditionalBlock(
[logical_not(pred)], is_scalar_condition=True
[paddle.logical_not(pred)], is_scalar_condition=True
)
with false_cond_block.block():
origin_false_output = false_fn()
......@@ -3260,13 +3261,13 @@ class Switch:
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
not_cond = paddle.logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition)
x=pre_not_cond, y=paddle.logical_not(x=condition)
)
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
......
......@@ -151,7 +151,6 @@ __all__ = [
'size',
'logical_and',
'logical_or',
'logical_not',
'clip',
'clip_by_norm',
'mean',
......@@ -11549,41 +11548,6 @@ def logical_or(x, y, out=None, name=None):
)
@templatedoc()
def logical_not(x, out=None, name=None):
"""
``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``.
Each element of ``out`` is calculated by
.. math::
out = !x
Args:
x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float32, or float64.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x)
print(res) # [False True False True]
"""
if in_dygraph_mode():
return _C_ops.logical_not(x)
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False
)
@templatedoc()
def clip(x, min, max, name=None):
"""
......
......@@ -1332,7 +1332,7 @@ class BeamSearchDecoder(Decoder):
beam_state.lengths, beam_indices, self.batch_size
)
next_lengths = next_lengths + tensor.cast(
nn.logical_not(next_finished), beam_state.lengths.dtype
paddle.logical_not(next_finished), beam_state.lengths.dtype
)
next_finished = control_flow.logical_or(
next_finished,
......@@ -1481,7 +1481,7 @@ def _dynamic_decode_imperative(
initial_states,
initial_finished,
)
cond = control_flow.logical_not((nn.reduce_all(initial_finished)))
cond = paddle.logical_not((nn.reduce_all(initial_finished)))
sequence_lengths = tensor.cast(tensor.zeros_like(initial_finished), "int64")
outputs = None
......@@ -1505,7 +1505,7 @@ def _dynamic_decode_imperative(
next_sequence_lengths = nn.elementwise_add(
sequence_lengths,
tensor.cast(
control_flow.logical_not(finished), sequence_lengths.dtype
paddle.logical_not(finished), sequence_lengths.dtype
),
)
if impute_finished: # rectify the states for the finished.
......@@ -1539,7 +1539,7 @@ def _dynamic_decode_imperative(
control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True)
step_idx += 1
cond = control_flow.logical_not(nn.reduce_all(finished))
cond = paddle.logical_not(nn.reduce_all(finished))
if max_step_num is not None and step_idx > max_step_num:
break
......@@ -1587,7 +1587,7 @@ def _dynamic_decode_declarative(
global_finished.stop_gradient = True
step_idx = tensor.fill_constant(shape=[1], dtype="int64", value=0)
cond = control_flow.logical_not((nn.reduce_all(initial_finished)))
cond = paddle.logical_not((nn.reduce_all(initial_finished)))
if max_step_num is not None:
max_step_num = tensor.fill_constant(
shape=[1], dtype="int64", value=max_step_num
......@@ -1665,7 +1665,7 @@ def _dynamic_decode_declarative(
next_sequence_lengths = nn.elementwise_add(
sequence_lengths,
tensor.cast(
control_flow.logical_not(global_finished),
paddle.logical_not(global_finished),
sequence_lengths.dtype,
),
)
......@@ -1720,12 +1720,12 @@ def _dynamic_decode_declarative(
)
if max_step_num is not None:
control_flow.logical_and(
control_flow.logical_not(nn.reduce_all(global_finished)),
paddle.logical_not(nn.reduce_all(global_finished)),
control_flow.less_equal(step_idx, max_step_num),
cond,
)
else:
control_flow.logical_not(nn.reduce_all(global_finished), cond)
paddle.logical_not(nn.reduce_all(global_finished), cond)
final_outputs = map_structure(
lambda array: tensor.tensor_array_to_tensor(
......
......@@ -162,7 +162,7 @@ def decoder_decode(context, is_sparse):
# update the break condition: up to the max length or all candidates of
# source sentences have ended.
length_cond = pd.less_than(x=counter, y=array_len)
finish_cond = pd.logical_not(pd.is_empty(x=selected_ids))
finish_cond = paddle.logical_not(pd.is_empty(x=selected_ids))
pd.logical_and(x=length_cond, y=finish_cond, out=cond)
translation_ids, translation_scores = pd.beam_search_decode(
......
......@@ -26,6 +26,7 @@ import tarfile
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP
import paddle
const_para_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(0.001))
const_bias_attr = const_para_attr
......@@ -1860,7 +1861,7 @@ def fast_decode(
layers.assign(pre_caches[i]["k"], caches[i]["k"])
layers.assign(pre_caches[i]["v"], caches[i]["v"])
length_cond = layers.less_than(x=step_idx, y=max_len)
finish_cond = layers.logical_not(layers.is_empty(x=selected_ids))
finish_cond = paddle.logical_not(layers.is_empty(x=selected_ids))
layers.logical_and(x=length_cond, y=finish_cond, out=cond)
finished_ids, finished_scores = layers.beam_search_decode(
......
......@@ -42,7 +42,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="bool"
)
out = paddle.fluid.layers.logical_not(x)
out = paddle.logical_not(x)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册