未验证 提交 0754e09d 编写于 作者: H heyanru 提交者: GitHub

[Fluid Clean] remove paddle.fluid.layers.nn.reduce_all,reduce_any (#48269)

上级 a0b91c7b
......@@ -460,7 +460,7 @@ class OptimizerWithMixedPrecision:
if self._is_distributed or self._use_pure_fp16:
with self._train_program._optimized_guard([]):
all_infs = layers.concat(found_infs)
found_inf = layers.reduce_any(all_infs)
found_inf = paddle.any(all_infs)
return found_inf
......
......@@ -71,8 +71,6 @@ __all__ = [
'softmax',
'pool2d',
'batch_norm',
'reduce_all',
'reduce_any',
'dropout',
'split',
'ctc_greedy_decoder',
......@@ -2504,137 +2502,6 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
return out
def reduce_all(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. The default value is None.
Returns:
Tensor, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = fluid.layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = fluid.layers.cast(x, 'bool')
out = fluid.layers.reduce_all(x) # False
out = fluid.layers.reduce_all(x, dim=0) # [True, False]
out = fluid.layers.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = fluid.layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
return _C_ops.all(input, dim if dim is not None else [], keep_dim)
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all')
helper = LayerHelper('reduce_all', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_all',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True
if dim is None or dim == [] or len(dim) == len(input.shape)
else False,
},
)
return out
def reduce_any(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = fluid.layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = fluid.layers.cast(x, 'bool')
out = fluid.layers.reduce_any(x) # True
out = fluid.layers.reduce_any(x, dim=0) # [True, False]
out = fluid.layers.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = fluid.layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_any')
helper = LayerHelper('reduce_any', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_any',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True
if dim is None or dim == [] or len(dim) == len(input.shape)
else False,
},
)
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
......
......@@ -1481,7 +1481,7 @@ def _dynamic_decode_imperative(
initial_states,
initial_finished,
)
cond = paddle.logical_not((nn.reduce_all(initial_finished)))
cond = paddle.logical_not((paddle.all(initial_finished)))
sequence_lengths = tensor.cast(paddle.zeros_like(initial_finished), "int64")
outputs = None
......@@ -1539,7 +1539,7 @@ def _dynamic_decode_imperative(
control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True)
step_idx += 1
cond = paddle.logical_not(nn.reduce_all(finished))
cond = paddle.logical_not(paddle.all(finished))
if max_step_num is not None and step_idx > max_step_num:
break
......@@ -1589,7 +1589,7 @@ def _dynamic_decode_declarative(
global_finished.stop_gradient = True
step_idx = tensor.fill_constant(shape=[1], dtype="int64", value=0)
cond = paddle.logical_not((nn.reduce_all(initial_finished)))
cond = paddle.logical_not((paddle.all(initial_finished)))
if max_step_num is not None:
max_step_num = tensor.fill_constant(
shape=[1], dtype="int64", value=max_step_num
......@@ -1720,12 +1720,12 @@ def _dynamic_decode_declarative(
)
if max_step_num is not None:
paddle.logical_and(
paddle.logical_not(nn.reduce_all(global_finished)),
paddle.logical_not(paddle.all(global_finished)),
paddle.less_equal(step_idx, max_step_num),
cond,
)
else:
paddle.logical_not(nn.reduce_all(global_finished), cond)
paddle.logical_not(paddle.all(global_finished), cond)
final_outputs = map_structure(
lambda array: tensor.tensor_array_to_tensor(
......
......@@ -873,7 +873,7 @@ class Transformer(Layer):
predict_ids.append(token_indices)
parent_ids.append(beam_indices)
if layers.reduce_all(finished).numpy():
if paddle.all(finished).numpy():
break
predict_ids = paddle.stack(predict_ids, axis=0)
......
......@@ -180,12 +180,12 @@ class TestAll(TestMean):
self.fetch_list = [out.name]
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_all
self.op = paddle.all
class TestAny(TestAll):
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_any
self.op = paddle.any
if __name__ == "__main__":
......
......@@ -505,12 +505,12 @@ class TestAllOpError(unittest.TestCase):
with program_guard(Program(), Program()):
# The input type of reduce_all_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
self.assertRaises(TypeError, paddle.all, input1)
# The input dtype of reduce_all_op must be bool.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32"
)
self.assertRaises(TypeError, fluid.layers.reduce_all, input2)
self.assertRaises(TypeError, paddle.all, input2)
class TestAnyOp(OpTest):
......@@ -622,12 +622,12 @@ class TestAnyOpError(unittest.TestCase):
with program_guard(Program(), Program()):
# The input type of reduce_any_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
self.assertRaises(TypeError, paddle.any, input1)
# The input dtype of reduce_any_op must be bool.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32"
)
self.assertRaises(TypeError, fluid.layers.reduce_any, input2)
self.assertRaises(TypeError, paddle.any, input2)
class Test1DReduce(OpTest):
......
......@@ -27,8 +27,6 @@ from paddle.fluid.layers import (
from paddle.fluid.layers import (
assign,
fill_constant,
reduce_all,
reduce_any,
)
from paddle.fluid.layers import (
cast,
......@@ -651,7 +649,7 @@ def convert_shape_compare(left, *args):
def reduce_compare(x, op_str, y):
element_wise_result = eval("x " + op_str + " y")
if op_str == "!=":
return reduce_any(element_wise_result)
return paddle.any(element_wise_result)
elif (
op_str == "is"
or op_str == "is not"
......@@ -660,7 +658,7 @@ def convert_shape_compare(left, *args):
):
return element_wise_result
else:
return reduce_all(element_wise_result)
return paddle.all(element_wise_result)
final_result = reduce_compare(left, args[0], args[1])
for i in range(1, num_cmp):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册