提交 c47e258e 编写于 作者: B baojun 提交者: tensor-tang

Add ngraph sum, sigmoid, relu_grad and tanh_grad op (#15642)

* Added ngraph sum op test=develop

* Added sigmoid, relu_grad and tanh_grad test=develop

* remove duplicates test=develop
上级 33d0cebb
...@@ -48,8 +48,12 @@ std::map<std::string, ...@@ -48,8 +48,12 @@ std::map<std::string,
{"softmax", NG_OPS::BuildSoftmaxNode}, {"softmax", NG_OPS::BuildSoftmaxNode},
{"softmax_grad", NG_OPS::BuildSoftmaxGradNode}, {"softmax_grad", NG_OPS::BuildSoftmaxGradNode},
{"scale", NG_OPS::BuildScaleNode}, {"scale", NG_OPS::BuildScaleNode},
{"sigmoid", NG_OPS::BuildUnaryNode<ngraph::op::Sigmoid>},
{"sum", NG_OPS::BuildSumNode},
{"relu", NG_OPS::BuildUnaryNode<ngraph::op::Relu>}, {"relu", NG_OPS::BuildUnaryNode<ngraph::op::Relu>},
{"relu_grad", NG_OPS::BuildReluGradNode},
{"tanh", NG_OPS::BuildUnaryNode<ngraph::op::Tanh>}, {"tanh", NG_OPS::BuildUnaryNode<ngraph::op::Tanh>},
{"tanh_grad", NG_OPS::BuildTanhGradNode},
{"top_k", NG_OPS::BuildTopKNode}}; {"top_k", NG_OPS::BuildTopKNode}};
void NgraphBridge::BuildNgNode( void NgraphBridge::BuildNgNode(
......
...@@ -22,6 +22,7 @@ limitations under the License. */ ...@@ -22,6 +22,7 @@ limitations under the License. */
#pragma once #pragma once
#include "ops/accuracy_op.h" #include "ops/accuracy_op.h"
#include "ops/activation_op.h"
#include "ops/batch_norm_op.h" #include "ops/batch_norm_op.h"
#include "ops/binary_unary_op.h" #include "ops/binary_unary_op.h"
#include "ops/conv2d_op.h" #include "ops/conv2d_op.h"
...@@ -32,4 +33,5 @@ limitations under the License. */ ...@@ -32,4 +33,5 @@ limitations under the License. */
#include "ops/pool2d_op.h" #include "ops/pool2d_op.h"
#include "ops/scale_op.h" #include "ops/scale_op.h"
#include "ops/softmax_op.h" #include "ops/softmax_op.h"
#include "ops/sum_op.h"
#include "ops/top_k_op.h" #include "ops/top_k_op.h"
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildReluGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto out = platform::GetInputNode(op, "Out", ngb_node_map);
auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto relu_grad = std::make_shared<ngraph::op::ReluBackprop>(out, dout);
platform::SetOutputNode(op, "X@GRAD", relu_grad, ngb_node_map);
}
void BuildTanhGradNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto out = platform::GetInputNode(op, "Out", ngb_node_map);
auto dout = platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
auto shape = out->get_shape();
auto node_const =
ngraph::op::Constant::create(ngraph::element::f32, shape, {1});
auto result = dout * (node_const - out * out);
platform::SetOutputNode(op, "X@GRAD", result, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildSumNode(
const std::shared_ptr<framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
std::vector<std::string> op_inputs;
for (auto& var_name_item : op->Inputs()) {
for (auto& var_name : var_name_item.second) {
op_inputs.push_back(var_name);
if (ngb_node_map->find(var_name) == ngb_node_map->end()) {
PADDLE_THROW("op % input varname %s is not found in var_node_map",
op->Type(), var_name);
}
}
}
std::shared_ptr<ngraph::Node>& sum = ngb_node_map->at(op_inputs[0]);
for (size_t k = 1; k < op_inputs.size(); ++k) {
std::shared_ptr<ngraph::Node>& nodek = ngb_node_map->at(op_inputs[k]);
if (nodek->get_element_type() != sum->get_element_type()) {
nodek =
std::make_shared<ngraph::op::Convert>(nodek, sum->get_element_type());
}
sum = sum + nodek;
}
platform::SetOutputNode(op, "Out", sum, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
...@@ -18,17 +18,7 @@ import unittest ...@@ -18,17 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh from paddle.fluid.tests.unittests.test_activation_op import TestSigmoid, TestRelu, TestTanh
class TestNGRAPHReluDim2(TestRelu):
def setUp(self):
super(TestNGRAPHReluDim2, self).setUp()
class TestNGRAPHTanhDim2(TestTanh):
def setUp(self):
super(TestNGRAPHTanhDim2, self).setUp()
class TestNGRAPHReluDim4(TestRelu): class TestNGRAPHReluDim4(TestRelu):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.tests.unittests.test_sum_op import TestSumOp, TestSelectedRowsSumOp, TestLoDTensorAndSelectedRowsOp
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册