From 9035bb81feb5eaec493ae1f7f388b12830266a53 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Wed, 19 Dec 2018 05:12:41 +0100 Subject: [PATCH] Enable mul operator for a ngraph engine (#14801) * Enable mul operator for a ngraph test=develop * Enable activation ops test test=develop * Remove unused line test=develop --- paddle/fluid/framework/ngraph_bridge.cc | 91 ++---------- paddle/fluid/operators/ngraph/ngraph_ops.h | 25 ++++ .../operators/ngraph/ops/binary_unnary_op.h | 52 +++++++ paddle/fluid/operators/ngraph/ops/mul_op.h | 134 ++++++++++++++++++ paddle/fluid/platform/ngraph_helper.h | 105 ++++++++++++++ .../ngraph/test_activation_ngraph_op.py | 58 ++++++++ .../unittests/ngraph/test_mul_ngraph_op.py | 42 ++++++ 7 files changed, 424 insertions(+), 83 deletions(-) create mode 100644 paddle/fluid/operators/ngraph/ngraph_ops.h create mode 100644 paddle/fluid/operators/ngraph/ops/binary_unnary_op.h create mode 100644 paddle/fluid/operators/ngraph/ops/mul_op.h create mode 100644 paddle/fluid/platform/ngraph_helper.h create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py diff --git a/paddle/fluid/framework/ngraph_bridge.cc b/paddle/fluid/framework/ngraph_bridge.cc index a5acfd70449..5fcb17b9f3a 100644 --- a/paddle/fluid/framework/ngraph_bridge.cc +++ b/paddle/fluid/framework/ngraph_bridge.cc @@ -16,100 +16,25 @@ limitations under the License. */ #include #include +#include "ngraph/ngraph.hpp" #include "paddle/fluid/framework/ngraph_bridge.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/ngraph/ngraph_ops.h" #include "paddle/fluid/platform/enforce.h" - -#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { namespace framework { -static std::shared_ptr GetNode( - const std::shared_ptr& op, const std::string name, - const VariableNameMap& var_map, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto& var_names = var_map.at(name); - PADDLE_ENFORCE_EQ(var_names.size(), 1, - "op %s name %s expects one associated var", op->Type(), - name); - if (ngb_node_map->find(var_names[0]) != ngb_node_map->end()) { - return (*ngb_node_map)[var_names[0]]; - } else { - return nullptr; - } -} - -static std::shared_ptr GetInputNode( - const std::shared_ptr& op, const std::string name, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - return GetNode(op, name, op->Inputs(), ngb_node_map); -} - -static std::shared_ptr GetOutputNode( - const std::shared_ptr& op, const std::string name, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - return GetNode(op, name, op->Outputs(), ngb_node_map); -} - -static void SetOutputNode( - const std::shared_ptr& op, const std::string name, - std::shared_ptr node, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto& var_names = op->Outputs().at(name); - if (var_names.size() == 1) { - (*ngb_node_map)[var_names[0]] = node; - } else if (var_names.size() == 0) { - (*ngb_node_map)[""] = node; - } else { - PADDLE_THROW("name %s has more than 1 var_names.", name); - } -} - -static bool HasOutput(const std::shared_ptr& op, - const std::string name) { - auto& outputs = op->Outputs(); - if (outputs.find(name) == outputs.end()) return false; - return outputs.at(name).size() > 0; -} - -template -static void BuildBinaryNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto x = GetInputNode(op, "X", ngb_node_map); - auto y = GetInputNode(op, "Y", ngb_node_map); - auto out = std::make_shared(x, y); - SetOutputNode(op, "Out", out, ngb_node_map); -} - -template -static void BuildUnaryNode( - const std::shared_ptr& op, - std::shared_ptr< - std::unordered_map>> - ngb_node_map) { - auto input = GetInputNode(op, "X", ngb_node_map); - auto out = std::make_shared(input); - SetOutputNode(op, "Out", out, ngb_node_map); -} - std::map&, std::shared_ptr>>)>> - NgraphBridge::NG_NODE_MAP = {{"relu", BuildUnaryNode}, - {"tanh", BuildUnaryNode}}; + NgraphBridge::NG_NODE_MAP = { + {"mul", paddle::operators::ngraphs::BuildMulNode}, + {"mul_grad", paddle::operators::ngraphs::BuildMulGradNode}, + {"relu", paddle::operators::ngraphs::BuildUnaryNode}, + {"tanh", paddle::operators::ngraphs::BuildUnaryNode}}; void NgraphBridge::BuildNgNode(const std::shared_ptr& op) { auto& op_type = op->Type(); diff --git a/paddle/fluid/operators/ngraph/ngraph_ops.h b/paddle/fluid/operators/ngraph/ngraph_ops.h new file mode 100644 index 00000000000..0ed77ff5577 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ngraph_ops.h @@ -0,0 +1,25 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains the list of the ngraph operators for Paddle. + * + * ATTENTION: It requires some C++11 features, for lower version C++ or C, we + * might release another API. + */ + +#pragma once + +#include "ops/binary_unnary_op.h" +#include "ops/mul_op.h" diff --git a/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h b/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h new file mode 100644 index 00000000000..4e2f5e231c1 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/binary_unnary_op.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_NGRAPH +#pragma once + +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +template +static void BuildBinaryNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); + auto out = std::make_shared(x, y); + paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); +} + +template +static void BuildUnaryNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto out = std::make_shared(input); + paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); +} + +} // namespace ngraphs +} // namespace operators +} // namespace paddle +#endif diff --git a/paddle/fluid/operators/ngraph/ops/mul_op.h b/paddle/fluid/operators/ngraph/ops/mul_op.h new file mode 100644 index 00000000000..9e12e5d7c3d --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/mul_op.h @@ -0,0 +1,134 @@ +/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_NGRAPH +#pragma once + +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +static void BuildMulNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + int x_num_col_dims = op_attrs.Get("x_num_col_dims"); + int y_num_col_dims = op_attrs.Get("y_num_col_dims"); + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); + + auto x_reshape = x; + auto y_reshape = y; + + if (x->get_shape().size() > 2) { + auto x_2d = paddle::platform::FlattenTo2d(x->get_shape(), x_num_col_dims); + x_reshape = paddle::platform::NgReshaper(x, x_2d); + } + + if (y->get_shape().size() > 2) { + auto y_2d = paddle::platform::FlattenTo2d(y->get_shape(), y_num_col_dims); + y_reshape = paddle::platform::NgReshaper(y, y_2d); + } + + std::shared_ptr out = + std::make_shared(x_reshape, y_reshape); + + auto dummy_out = paddle::platform::GetOutputNode(op, "Out", ngb_node_map); + if (dummy_out && dummy_out->get_shape() != out->get_shape()) { + out = paddle::platform::NgReshaper(out, dummy_out->get_shape()); + } + paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map); +} + +static void BuildMulGradNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + int x_num_col_dims = op_attrs.Get("x_num_col_dims"); + int y_num_col_dims = op_attrs.Get("y_num_col_dims"); + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map); + auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); + + bool is_dx = paddle::platform::HasOutput(op, "X@GRAD") ? true : false; + bool is_dy = paddle::platform::HasOutput(op, "Y@GRAD") ? true : false; + + auto x_shape = x->get_shape(); + auto y_shape = y->get_shape(); + + auto x_reshape = x; + auto y_reshape = y; + + if (x_shape.size() > 2) { + auto x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_num_col_dims); + x_reshape = paddle::platform::NgReshaper(x, x_2d_shape); + } + + if (y_shape.size() > 2) { + auto y_2d_shape = paddle::platform::FlattenTo2d(y_shape, y_num_col_dims); + y_reshape = paddle::platform::NgReshaper(y, y_2d_shape); + } + + auto x_reshape_shape = x_reshape->get_shape(); + std::reverse(x_reshape_shape.begin(), x_reshape_shape.end()); + auto x_transpose = std::make_shared( + x_reshape, ngraph::AxisVector{1, 0}, x_reshape_shape); + + auto y_reshape_shape = y_reshape->get_shape(); + std::reverse(y_reshape_shape.begin(), y_reshape_shape.end()); + auto y_transpose = std::make_shared( + y_reshape, ngraph::AxisVector{1, 0}, y_reshape_shape); + + if (is_dx) { + if (dout->get_shape().size() > 2) { + auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2); + dout = paddle::platform::NgReshaper(dout, dout_2d_shape); + } + auto dx = std::make_shared(dout, y_transpose); + + if (dx->get_shape() == x_shape) { + paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); + } else { + auto dx_reshape = paddle::platform::NgReshaper(dx, x_shape); + paddle::platform::SetOutputNode(op, "X@GRAD", dx_reshape, ngb_node_map); + } + } + + if (is_dy) { + if (dout->get_shape().size() > 2) { + auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2); + dout = paddle::platform::NgReshaper(dout, dout_2d_shape); + } + auto dy = std::make_shared(x_transpose, dout); + + if (dy->get_shape() == y_shape) { + paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map); + } else { + auto dy_reshape = paddle::platform::NgReshaper(dy, y_shape); + paddle::platform::SetOutputNode(op, "Y@GRAD", dy_reshape, ngb_node_map); + } + } +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle +#endif diff --git a/paddle/fluid/platform/ngraph_helper.h b/paddle/fluid/platform/ngraph_helper.h new file mode 100644 index 00000000000..889fb55c87d --- /dev/null +++ b/paddle/fluid/platform/ngraph_helper.h @@ -0,0 +1,105 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_NGRAPH +#pragma once + +#include +#include +#include +#include "ngraph/ngraph.hpp" + +namespace paddle { +namespace platform { + +static ngraph::Shape FlattenTo2d(ngraph::Shape sh, int num) { + auto x1 = std::accumulate(std::begin(sh), std::begin(sh) + num, 1, + std::multiplies()); + auto x2 = std::accumulate(std::begin(sh) + num, std::end(sh), 1, + std::multiplies()); + size_t x1_l = static_cast(x1); + size_t x2_l = static_cast(x2); + return ngraph::Shape{x1_l, x2_l}; +} + +static std::shared_ptr NgReshaper( + std::shared_ptr input, ngraph::Shape shape) { + std::vector input_order(input->get_shape().size()); + std::iota(std::begin(input_order), std::end(input_order), 0); + return std::make_shared( + input, ngraph::AxisVector(input_order), shape); +} + +static std::shared_ptr GetNode( + const std::shared_ptr& op, + const std::string prm, const paddle::framework::VariableNameMap& var_map, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto& var_names = var_map.at(prm); + PADDLE_ENFORCE_EQ(var_names.size(), 1, + "op %s prm %s expects one associated var", op->Type(), prm); + if (ngb_node_map->find(var_names[0]) != ngb_node_map->end()) { + return (*ngb_node_map)[var_names[0]]; + } else { + return nullptr; + } +} + +static std::shared_ptr GetInputNode( + const std::shared_ptr& op, + const std::string prm, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + return GetNode(op, prm, op->Inputs(), ngb_node_map); +} + +static std::shared_ptr GetOutputNode( + const std::shared_ptr& op, + const std::string prm, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + return GetNode(op, prm, op->Outputs(), ngb_node_map); +} + +static void SetOutputNode( + const std::shared_ptr& op, + const std::string prm, std::shared_ptr node, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto& var_names = op->Outputs().at(prm); + if (var_names.size() == 1) { + (*ngb_node_map)[var_names[0]] = node; + } else if (var_names.size() == 0) { + (*ngb_node_map)[""] = node; + } else { + PADDLE_THROW("prm %s has more than 1 var_names.", prm); + } +} + +static bool HasOutput( + const std::shared_ptr& op, + const std::string prm) { + auto& outputs = op->Outputs(); + if (outputs.find(prm) == outputs.end()) return false; + return outputs.at(prm).size() > 0; +} + +} // namespace platform +} // namespace paddle + +#endif diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py new file mode 100644 index 00000000000..2bd9bf84303 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_activation_ngraph_op.py @@ -0,0 +1,58 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh + + +class TestNGRAPHReluDim2(TestRelu): + def setUp(self): + super(TestNGRAPHReluDim2, self).setUp() + + +class TestNGRAPHTanhDim2(TestTanh): + def setUp(self): + super(TestNGRAPHTanhDim2, self).setUp() + + +class TestNGRAPHReluDim4(TestRelu): + def setUp(self): + super(TestNGRAPHReluDim4, self).setUp() + + x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") + # The same reason with TestAbs + x[np.abs(x) < 0.005] = 0.02 + out = np.maximum(x, 0) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + +class TestNGRAPHTanhDim4(TestTanh): + def setUp(self): + super(TestNGRAPHTanhDim4, self).setUp() + + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") + } + self.outputs = {'Out': np.tanh(self.inputs['X'])} + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py new file mode 100644 index 00000000000..6aba62f7c08 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py @@ -0,0 +1,42 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +from paddle.fluid.tests.unittests.test_mul_op import TestMulOp, TestMulOp2, TestFP16MulOp1, TestFP16MulOp2 + + +class TestNGRAPHMulOp(TestMulOp): + def init_dtype_type(self): + pass + + +class TestNGRAPHMulOp2(TestMulOp2): + def init_dtype_type(self): + pass + + +class TestNGRAPHFP16MulOp1(TestFP16MulOp1): + def init_dtype_type(self): + pass + + +class TestNGRAPHFP16MulOp2(TestFP16MulOp2): + def init_dtype_type(self): + pass + + +if __name__ == "__main__": + unittest.main() -- GitLab