diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 951514710b29d1d6fb3165acd92b65d997f43772..6a479b734823215bdd471dcf82508d46a7832965 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -118,6 +118,8 @@ paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name paddle.fluid.layers.reduce_max (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '66a622db727551761ce4eb73eaa7f6a4')) paddle.fluid.layers.reduce_min (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd50ac552b5d131468ed466d08bb2d38c')) paddle.fluid.layers.reduce_prod (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'fcd8301a0ce15f219c7a4bcd0c1e8eca')) +paddle.fluid.layers.reduce_all (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '646ca4d4a2cc16084f59de44b6927eca')) +paddle.fluid.layers.reduce_any (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'f36661060aeeaf6c6b1331e41b3726fa')) paddle.fluid.layers.sequence_first_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '2b290d3d77882bfe9bb8d331cac8cdd3')) paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'c16a892f44f7fe71bfa5afc32d3f34ce')) paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fdcea0e8b5bc7d8d4b1b072c521014e6')) diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.cc b/paddle/fluid/operators/reduce_ops/reduce_all_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b087fbbb94c7ba2f7449f6bda56010dee1c38ea6 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_all_op.cc @@ -0,0 +1,20 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_all_op.h" + +REGISTER_REDUCE_OP(reduce_all); +REGISTER_OP_CPU_KERNEL(reduce_all, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.cu b/paddle/fluid/operators/reduce_ops/reduce_all_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..bd94ba263d957d0d65506ecd802bf43add6e2fb4 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_all_op.cu @@ -0,0 +1,19 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_all_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_all, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.h b/paddle/fluid/operators/reduce_ops/reduce_all_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ba159dd703c8904784546eda262bf7be77967d48 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_all_op.h @@ -0,0 +1,29 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" + +namespace paddle { +namespace operators { + +struct AllFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->all(dim); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reduce_ops/reduce_any_op.cc b/paddle/fluid/operators/reduce_ops/reduce_any_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d865dcb3c935b76b8da25d723a5f780fb4de255b --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_any_op.cc @@ -0,0 +1,20 @@ +// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_any_op.h" + +REGISTER_REDUCE_OP(reduce_any); +REGISTER_OP_CPU_KERNEL(reduce_any, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_any_op.cu b/paddle/fluid/operators/reduce_ops/reduce_any_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..66f0c9997ea1e27cf172a6839a68d2eb23395c4d --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_any_op.cu @@ -0,0 +1,19 @@ +// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_any_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_any, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_any_op.h b/paddle/fluid/operators/reduce_ops/reduce_any_op.h new file mode 100644 index 0000000000000000000000000000000000000000..b36bad9cada259932d2bd77c2426fbb46790de76 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_any_op.h @@ -0,0 +1,29 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" + +namespace paddle { +namespace operators { + +struct AnyFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->any(dim); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index eaa07477d85be0674dd654097849cf6d3f0ac442..0c3cbfa0582eb5ce1b8ec871dbcb0ff8bb7907b4 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -73,6 +73,8 @@ __all__ = [ 'reduce_max', 'reduce_min', 'reduce_prod', + 'reduce_all', + 'reduce_any', 'sequence_first_step', 'sequence_last_step', 'sequence_slice', @@ -4738,6 +4740,106 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): return out +def reduce_all(input, dim=None, keep_dim=False, name=None): + """ + Computes the ``logical and`` of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (list|int|None): The dimension along which the logical and is computed. + If :attr:`None`, compute the logical and over all elements of + :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. + If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a bool Tensor variable with following elements: + # [[True, False] + # [True, True]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_all(x) # False + fluid.layers.reduce_all(x, dim=0) # [True, False] + fluid.layers.reduce_all(x, dim=-1) # [False, True] + fluid.layers.reduce_all(x, dim=1, + keep_dim=True) # [[False], [True]] + + """ + helper = LayerHelper('reduce_all', **locals()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] + helper.append_op( + type='reduce_all', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else [0], + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + +def reduce_any(input, dim=None, keep_dim=False, name=None): + """ + Computes the ``logical or`` of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (list|int|None): The dimension along which the logical or is computed. + If :attr:`None`, compute the logical or over all elements of + :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. + If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a bool Tensor variable with following elements: + # [[True, False] + # [False, False]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_any(x) # True + fluid.layers.reduce_any(x, dim=0) # [True, False] + fluid.layers.reduce_any(x, dim=-1) # [True, False] + fluid.layers.reduce_any(x, dim=1, + keep_dim=True) # [[True], [False]] + + """ + helper = LayerHelper('reduce_any', **locals()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] + helper.append_op( + type='reduce_any', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else [0], + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + def split(input, num_or_sections, dim=-1, name=None): """ Split the input tensor into multiple sub-tensors. diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 8fc8125a773543eea768783155ad152c475535b5..65fc1453d8db13ad9c85746c3bf148f898e8f788 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -91,6 +91,78 @@ class TestProdOp(OpTest): self.check_grad(['X'], 'Out') +class TestAllOp(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.outputs = {'Out': self.inputs['X'].all()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output() + + +class TestAllOpWithDim(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': [1]} + self.outputs = {'Out': self.inputs['X'].all(axis=1)} + + def test_check_output(self): + self.check_output() + + +class TestAllOpWithKeepDim(OpTest): + def setUp(self): + self.op_type = "reduce_all" + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': [1], 'keep_dim': True} + self.outputs = { + 'Out': np.expand_dims( + self.inputs['X'].all(axis=1), axis=1) + } + + def test_check_output(self): + self.check_output() + + +class TestAnyOp(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.outputs = {'Out': self.inputs['X'].any()} + self.attrs = {'reduce_all': True} + + def test_check_output(self): + self.check_output() + + +class TestAnyOpWithDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': [1]} + self.outputs = {'Out': self.inputs['X'].any(axis=1)} + + def test_check_output(self): + self.check_output() + + +class TestAnyOpWithKeepDim(OpTest): + def setUp(self): + self.op_type = "reduce_any" + self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} + self.attrs = {'dim': [1], 'keep_dim': True} + self.outputs = { + 'Out': np.expand_dims( + self.inputs['X'].any(axis=1), axis=1) + } + + def test_check_output(self): + self.check_output() + + class Test1DReduce(OpTest): def setUp(self): self.op_type = "reduce_sum"