From cba729404d55489502eeaaf97e4593930b07726e Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Wed, 9 Jan 2019 07:08:53 +0100 Subject: [PATCH] Enable softmax operator for a ngraph engine test=develop --- paddle/fluid/framework/ngraph_bridge.cc | 2 + paddle/fluid/operators/ngraph/ngraph_ops.h | 1 + .../fluid/operators/ngraph/ops/softmax_op.h | 74 +++++++++++++++++++ .../ngraph/test_softmax_ngraph_op.py | 26 +++++++ 4 files changed, 103 insertions(+) create mode 100644 paddle/fluid/operators/ngraph/ops/softmax_op.h create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py diff --git a/paddle/fluid/framework/ngraph_bridge.cc b/paddle/fluid/framework/ngraph_bridge.cc index b083493ba4f..d5ca47b1919 100644 --- a/paddle/fluid/framework/ngraph_bridge.cc +++ b/paddle/fluid/framework/ngraph_bridge.cc @@ -36,6 +36,8 @@ std::map}, {"tanh", paddle::operators::ngraphs::BuildUnaryNode}, diff --git a/paddle/fluid/operators/ngraph/ngraph_ops.h b/paddle/fluid/operators/ngraph/ngraph_ops.h index 2a479081f1e..3e6277e6baa 100644 --- a/paddle/fluid/operators/ngraph/ngraph_ops.h +++ b/paddle/fluid/operators/ngraph/ngraph_ops.h @@ -26,4 +26,5 @@ limitations under the License. */ #include "ops/mean_op.h" #include "ops/mul_op.h" #include "ops/scale_op.h" +#include "ops/softmax_op.h" #include "ops/top_k_op.h" diff --git a/paddle/fluid/operators/ngraph/ops/softmax_op.h b/paddle/fluid/operators/ngraph/ops/softmax_op.h new file mode 100644 index 00000000000..fc6395c08bc --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/softmax_op.h @@ -0,0 +1,74 @@ +/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildSoftmaxNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto x_shape = x->get_shape(); + int rank = x_shape.size(); + auto x_2d_shape = paddle::platform::FlattenTo2d(x_shape, rank - 1); + x = paddle::platform::NgReshaper(x, x_2d_shape); + + auto x_max = std::make_shared(x, ngraph::AxisSet{1}); + auto x_max_bcast = std::make_shared( + x_max, x_shape, ngraph::AxisSet{1}); + auto x_shifted = x - x_max_bcast; + auto x_clipped = + paddle::operators::ngraphs::ElementwiseScalar( + -64., x_shifted); + auto softmax = + std::make_shared(x_clipped, ngraph::AxisSet{1}); + paddle::platform::SetOutputNode(op, "Out", softmax, ngb_node_map); +} + +void BuildSoftmaxGradNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map); + auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); + auto out_shape = out->get_shape(); + int rank = out_shape.size(); + auto out_2d_shape = paddle::platform::FlattenTo2d(out_shape, rank - 1); + auto dout_2d_shape = + paddle::platform::FlattenTo2d(dout->get_shape(), rank - 1); + out = paddle::platform::NgReshaper(out, out_2d_shape); + dout = paddle::platform::NgReshaper(dout, dout_2d_shape); + + auto node_sum = + std::make_shared(out * dout, ngraph::AxisSet{1}); + auto node_bcast = std::make_shared( + node_sum, out_2d_shape, ngraph::AxisSet{1}); + auto dx = (dout - node_bcast) * out; + paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py new file mode 100644 index 00000000000..81894c6e387 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py @@ -0,0 +1,26 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp + + +class TestSoftmaxNGRAPHOp(TestSoftmaxOp): + def setUp(self): + super(TestSoftmaxNGRAPHOp, self).setUp() + + +if __name__ == "__main__": + unittest.main() -- GitLab