提交 7bd1d03e 编写于 作者: B baojun 提交者: tensor-tang

Adding lrn op for ngraph engine (#17189)

* added lrn op test=develop

* Added CreateConstant method test=develop

* avoid duplicates test=develop
上级 984aa905
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
static void BuildLrnNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto input = platform::GetInputNode(op, "X", ngb_node_map);
auto op_attrs = framework::AttrReader(op->Attrs());
const int n = op_attrs.Get<int>("n");
const float alpha = op_attrs.Get<float>("alpha") * static_cast<float>(n);
const float beta = op_attrs.Get<float>("beta");
const float k = op_attrs.Get<float>("k");
auto lrn_out = std::make_shared<ngraph::op::LRN>(input, alpha, beta, k, n);
std::shared_ptr<ngraph::Node> mid_out = paddle::platform::CreateConstant(
input->get_element_type(), input->get_shape(), {k});
platform::SetOutputNode(op, "MidOut", mid_out, ngb_node_map);
platform::SetOutputNode(op, "Out", lrn_out, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
REGISTER_NG_OP(lrn, BuildLrnNode);
......@@ -16,7 +16,9 @@ limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
......@@ -103,6 +105,25 @@ std::shared_ptr<ngraph::Node> GetOutputNode(
return GetNode(op, name, op->Outputs(), ngb_node_map);
}
template <typename T>
std::shared_ptr<ngraph::Node> CreateConstant(const ngraph::element::Type& type,
ngraph::Shape shape,
std::initializer_list<T> values) {
std::shared_ptr<ngraph::Node> result;
if (values.size() == 1 && shape != ngraph::Shape{} && // NOLINT
shape != ngraph::Shape{1}) {
result = std::make_shared<ngraph::op::Constant>(type, ngraph::Shape{},
std::vector<T>{values});
ngraph::AxisSet axis_set;
for (size_t i = 0; i < shape.size(); ++i) axis_set.insert(i);
result = std::make_shared<ngraph::op::Broadcast>(result, shape, axis_set);
} else {
result = std::make_shared<ngraph::op::Constant>(type, shape,
std::vector<T>{values});
}
return result;
}
void SetOutputNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
const std::string name, std::shared_ptr<ngraph::Node> node,
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.tests.unittests.test_lrn_op import TestLRNOp
class TestLRNNGRAPHOp(TestLRNOp):
def test_check_output(self):
self.check_output(atol=0.002)
del TestLRNOp
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册