From 70a887af63ed54111e41f3a67f69bf38cc8de208 Mon Sep 17 00:00:00 2001 From: pawelpiotrowicz <48519735+pawelpiotrowicz@users.noreply.github.com> Date: Wed, 29 May 2019 09:52:44 +0200 Subject: [PATCH] [NGraph] Add reduce_sum operator for Ngraph (#17450) test=develop --- .../operators/ngraph/ops/reduce_sum_op.h | 161 ++++++++++++++++++ .../unittests/ngraph/test_reduce_ngraph_op.py | 37 ++++ 2 files changed, 198 insertions(+) create mode 100644 paddle/fluid/operators/ngraph/ops/reduce_sum_op.h create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py diff --git a/paddle/fluid/operators/ngraph/ops/reduce_sum_op.h b/paddle/fluid/operators/ngraph/ops/reduce_sum_op.h new file mode 100644 index 0000000000..ad89052880 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/reduce_sum_op.h @@ -0,0 +1,161 @@ +/*Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildReduceSumNode( + const std::shared_ptr &op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + bool reduce_all = op_attrs.Get("reduce_all"); + bool keep_dim = op_attrs.Get("keep_dim"); + std::vector dim = op_attrs.Get>("dim"); + auto input_shape = input->get_shape(); + ngraph::AxisSet axes; + if (reduce_all == true) { + for (size_t i = 0; i < input_shape.size(); ++i) { + axes.insert(i); + } + } else { + for (auto &i : dim) { + if (i < 0) { + axes.insert(input_shape.size() + i); + } else { + axes.insert(i); + } + } + } + std::shared_ptr reduce_sum = + std::make_shared(input, axes); + + if (keep_dim == true) { + std::vector dim_shape; + std::copy(input_shape.begin(), input_shape.end(), + std::back_inserter(dim_shape)); + for (auto &i : dim) { + if (i < 0) { + i = input_shape.size() + i; + } + dim_shape[i] = 1; + } + + std::vector axis_vector(input_shape.size() - dim.size()); + std::iota(axis_vector.begin(), axis_vector.end(), 0); + + auto reduce_sum_dim = std::make_shared( + reduce_sum, ngraph::AxisVector(axis_vector), ngraph::Shape(dim_shape)); + + paddle::platform::SetOutputNode(op, "Out", reduce_sum_dim, ngb_node_map); + } else { + if (reduce_sum->get_shape() == ngraph::Shape{}) { + reduce_sum = paddle::platform::NgReshaper(reduce_sum, ngraph::Shape{1}); + } + paddle::platform::SetOutputNode(op, "Out", reduce_sum, ngb_node_map); + } +} + +void BuildReduceSumGradNode( + const std::shared_ptr &op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto og = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + std::vector dim = op_attrs.Get>("dim"); + bool reduce_all = op_attrs.Get("reduce_all"); + bool keep_dim = op_attrs.Get("keep_dim"); + + auto og_shape = og->get_shape(); + auto x_shape = x->get_shape(); + float x_size = std::accumulate(std::begin(x_shape), std::end(x_shape), 1, + std::multiplies()); + float og_size = std::accumulate(std::begin(og_shape), std::end(og_shape), 1, + std::multiplies()); + ngraph::AxisSet axes; + + if (reduce_all == true) { + for (size_t i = 0; i < x_shape.size(); i++) { + axes.insert(i); + } + } else { + for (auto &i : dim) { + if (i < 0) { + axes.insert(x_shape.size() + i); + } else { + axes.insert(i); + } + } + } + std::vector axis_vector(og_shape.size()); + std::iota(axis_vector.begin(), axis_vector.end(), 0); + std::vector dim_shape; + + for (size_t i = 0; i < x_shape.size(); i++) { + if (std::find(dim.begin(), dim.end(), i) == dim.end() && + std::find(dim.begin(), dim.end(), i - x_shape.size()) == dim.end()) { + dim_shape.push_back(x_shape[i]); + } + } + + if (keep_dim == true) { + // reshape + if (x_size == og_size) { + paddle::platform::SetOutputNode(op, "X@GRAD", og, ngb_node_map); + return; + } + auto og_dim = std::make_shared( + og, ngraph::AxisVector(axis_vector), ngraph::Shape(dim_shape)); + auto result = + std::make_shared(og_dim, x_shape, axes); + paddle::platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map); + + } else { + if (x_size == og_size) { + auto og_dim = std::make_shared( + og, ngraph::AxisVector(axis_vector), x_shape); + paddle::platform::SetOutputNode(op, "X@GRAD", og_dim, ngb_node_map); + } else { + if (og->get_shape().size() == 1 && og->get_shape()[0] == 1) { + og = std::make_shared(og, ngraph::AxisVector{0}, + ngraph::Shape{}); + } + auto result = std::make_shared(og, x_shape, axes); + paddle::platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map); + } + } +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle + +REGISTER_NG_OP(reduce_sum, BuildReduceSumNode); +REGISTER_NG_OP(reduce_sum_grad, BuildReduceSumGradNode); diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py new file mode 100644 index 0000000000..458f65338d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_reduce_ngraph_op.py @@ -0,0 +1,37 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest, sys +sys.path.append("../") +import numpy as np +from test_reduce_op import TestSumOp, Test1DReduce, \ + Test2DReduce0, Test2DReduce1, Test3DReduce0, Test3DReduce1, Test3DReduce2, \ + Test3DReduce3, TestKeepDimReduce, TestKeepDimReduceSumMultiAxises, \ + TestReduceSumWithDimOne, TestReduceSumWithNumelOne + + +class Test3DReduce21(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.attrs = {'dim': [1, 2]} + self.inputs = {'X': np.random.random((20, 1, 5)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + +if __name__ == '__main__': + unittest.main() -- GitLab