From 2c3e4c18881b3c3103735415e5304f2fec5c9f20 Mon Sep 17 00:00:00 2001 From: zhoukunsheng Date: Wed, 20 Mar 2019 15:01:30 +0800 Subject: [PATCH] test=develop add reduce_all, reduce_any op --- paddle/fluid/API.spec | 2 + .../operators/reduce_ops/reduce_all_any_op.h | 36 +++++++ .../operators/reduce_ops/reduce_all_op.cc | 20 ++++ .../operators/reduce_ops/reduce_all_op.cu | 19 ++++ .../operators/reduce_ops/reduce_any_op.cc | 20 ++++ .../operators/reduce_ops/reduce_any_op.cu | 19 ++++ python/paddle/fluid/layers/nn.py | 100 ++++++++++++++++++ 7 files changed, 216 insertions(+) create mode 100644 paddle/fluid/operators/reduce_ops/reduce_all_any_op.h create mode 100644 paddle/fluid/operators/reduce_ops/reduce_all_op.cc create mode 100644 paddle/fluid/operators/reduce_ops/reduce_all_op.cu create mode 100644 paddle/fluid/operators/reduce_ops/reduce_any_op.cc create mode 100644 paddle/fluid/operators/reduce_ops/reduce_any_op.cu diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 1504085804..4385464ce0 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -106,6 +106,8 @@ paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name paddle.fluid.layers.reduce_max (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '66a622db727551761ce4eb73eaa7f6a4')) paddle.fluid.layers.reduce_min (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd50ac552b5d131468ed466d08bb2d38c')) paddle.fluid.layers.reduce_prod (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'fcd8301a0ce15f219c7a4bcd0c1e8eca')) +paddle.fluid.layers.reduce_all (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '5b416682bbba15421521a0fbb9b6a38c')) +paddle.fluid.layers.reduce_any (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd9ecb1bc85d12153f46ba027c10cb405')) paddle.fluid.layers.sequence_first_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '2b290d3d77882bfe9bb8d331cac8cdd3')) paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'c16a892f44f7fe71bfa5afc32d3f34ce')) paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fdcea0e8b5bc7d8d4b1b072c521014e6')) diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_any_op.h b/paddle/fluid/operators/reduce_ops/reduce_all_any_op.h new file mode 100644 index 0000000000..9667c1a970 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_all_any_op.h @@ -0,0 +1,36 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" + +namespace paddle { +namespace operators { + +struct AllFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->all(dim); + } +}; + +struct AnyFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->any(dim); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.cc b/paddle/fluid/operators/reduce_ops/reduce_all_op.cc new file mode 100644 index 0000000000..af4de3eaab --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_all_op.cc @@ -0,0 +1,20 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h" + +REGISTER_REDUCE_OP(reduce_all); +REGISTER_OP_CPU_KERNEL(reduce_all, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_all_op.cu b/paddle/fluid/operators/reduce_ops/reduce_all_op.cu new file mode 100644 index 0000000000..7150221f43 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_all_op.cu @@ -0,0 +1,19 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_all, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_any_op.cc b/paddle/fluid/operators/reduce_ops/reduce_any_op.cc new file mode 100644 index 0000000000..f65b2ddc6f --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_any_op.cc @@ -0,0 +1,20 @@ +// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h" + +REGISTER_REDUCE_OP(reduce_any); +REGISTER_OP_CPU_KERNEL(reduce_any, + ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_any_op.cu b/paddle/fluid/operators/reduce_ops/reduce_any_op.cu new file mode 100644 index 0000000000..389166fec6 --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/reduce_any_op.cu @@ -0,0 +1,19 @@ +// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_any, + ops::ReduceKernel); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index dbe495b75c..837bb92f82 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -73,6 +73,8 @@ __all__ = [ 'reduce_max', 'reduce_min', 'reduce_prod', + 'reduce_all', + 'reduce_any', 'sequence_first_step', 'sequence_last_step', 'sequence_slice', @@ -4699,6 +4701,104 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): return out +def reduce_all(input, dim=None, keep_dim=False, name=None): + """ + Computes the logical and of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (list|int|None): The dimension along which the logical and is computed. + If :attr:`None`, compute the logical and over all elements of + :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. + If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + # x is a bool Tensor variable with following elements: + # [[True, False] + # [True, True]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_all(x) # False + fluid.layers.reduce_all(x, dim=0) # [True, False] + fluid.layers.reduce_all(x, dim=-1) # [False, True] + fluid.layers.reduce_all(x, dim=1, + keep_dim=True) # [[False], [True]] + + """ + helper = LayerHelper('reduce_all', **locals()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] + helper.append_op( + type='reduce_all', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else [0], + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + +def reduce_any(input, dim=None, keep_dim=False, name=None): + """ + Computes the logical or of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (list|int|None): The dimension along which the logical or is computed. + If :attr:`None`, compute the logical or over all elements of + :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. + If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + # x is a bool Tensor variable with following elements: + # [[True, False] + # [False, False]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_any(x) # True + fluid.layers.reduce_any(x, dim=0) # [True, False] + fluid.layers.reduce_any(x, dim=-1) # [True, False] + fluid.layers.reduce_any(x, dim=1, + keep_dim=True) # [[True], [False]] + + """ + helper = LayerHelper('reduce_any', **locals()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] + helper.append_op( + type='reduce_any', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else [0], + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + def split(input, num_or_sections, dim=-1, name=None): """ Split the input tensor into multiple sub-tensors. -- GitLab