提交 df23a6f8 编写于 作者: M mozga-intel 提交者: tensor-tang

Enable cross_entropy operator for a ngraph engine (#15674)

* Enable cross_entropy operator for a ngraph engine
test=develop

* Update tests
test=develop

* Added PADDLE_ENFORCE for the batch_norm operator
test=develop

* Update the message about which format are supported right now
test=develop
上级 56a5039e
......@@ -36,6 +36,8 @@ std::map<std::string,
{"conv2d_grad", NG_OPS::BuildConv2dGradNode},
{"batch_norm", NG_OPS::BuildBatchNormNode},
{"batch_norm_grad", NG_OPS::BuildBatchNormGradNode},
{"cross_entropy", NG_OPS::BuildCrossEntropyNode},
{"cross_entropy_grad", NG_OPS::BuildCrossEntropyGradNode},
{"elementwise_add", NG_OPS::BuildElementwiseAddNode},
{"elementwise_add_grad", NG_OPS::BuildElementwiseAddGradNode},
{"fill_constant", NG_OPS::BuildFillConstantNode},
......
......@@ -26,6 +26,7 @@ limitations under the License. */
#include "ops/batch_norm_op.h"
#include "ops/binary_unary_op.h"
#include "ops/conv2d_op.h"
#include "ops/cross_entropy_op.h"
#include "ops/elementwise_add_op.h"
#include "ops/fill_constant_op.h"
#include "ops/mean_op.h"
......
......@@ -44,6 +44,10 @@ void BuildBatchNormNode(
const float epsilon = op_attrs.Get<float>("epsilon");
const float momentum = op_attrs.Get<float>("momentum");
PADDLE_ENFORCE(
data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC",
"The BatchNorm operator only supports NHWC/NCHW/NC data format");
if (data_layout == "NHWC") {
x = paddle::platform::Nhwc2Nchw(x);
}
......@@ -110,6 +114,9 @@ void BuildBatchNormGradNode(
"BN grap input size needs to be 2 or 4");
PADDLE_ENFORCE_EQ(x_shape.size(), dy_shape.size(),
"BN grap input and delta size needs to be equal");
PADDLE_ENFORCE(
data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC",
"The BatchNorm operator only supports NHWC/NCHW/NC data format");
if (x_shape.size() == 2) {
x = std::make_shared<ngraph::op::Reshape>(
......
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <functional>
#include <string>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/platform/ngraph_helper.h"
namespace paddle {
namespace operators {
namespace ngraphs {
void BuildCrossEntropyNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map);
auto label_shape = label->get_shape();
auto x_shape = x->get_shape();
auto label_rank = label_shape.size();
auto x_rank = x_shape.size();
std::shared_ptr<ngraph::Node> x_2d = x, label_2d = label;
auto label_2d_shape = label_shape, x_2d_shape = x_shape;
if (label_rank > 2) {
label_2d_shape = paddle::platform::FlattenTo2d(label_shape, label_rank - 1);
label_2d = paddle::platform::NgReshaper(label, label_2d_shape);
}
if (x_rank > 2) {
x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_rank - 1);
x_2d = paddle::platform::NgReshaper(x, x_2d_shape);
}
auto batch_size = x_2d_shape.at(0);
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const bool is_soft_label = op_attrs.Get<bool>("soft_label");
std::shared_ptr<ngraph::Node> node_1_hot = label_2d;
if (!is_soft_label) {
auto label_1d = paddle::platform::NgReshaper(
label_2d, ngraph::Shape{label_2d_shape.at(0)});
node_1_hot = std::make_shared<ngraph::op::OneHot>(label_1d, x_2d_shape, 1);
}
if (x->get_element_type() != node_1_hot->get_element_type()) {
node_1_hot = std::make_shared<ngraph::op::Convert>(node_1_hot,
x->get_element_type());
}
auto node_log = std::make_shared<ngraph::op::Log>(x_2d);
auto high_clip = ngraph::op::Constant::create(node_log->get_element_type(),
node_log->get_shape(), {1e20});
auto low_clip = ngraph::op::Constant::create(node_log->get_element_type(),
node_log->get_shape(), {-1e20});
auto node_min = std::make_shared<ngraph::op::Minimum>(node_log, high_clip);
auto node_max = std::make_shared<ngraph::op::Maximum>(node_min, low_clip);
auto node_mul = node_1_hot * node_log;
auto node_sum =
std::make_shared<ngraph::op::Sum>(node_mul, ngraph::AxisSet{1});
auto node_neg = std::make_shared<ngraph::op::Negative>(node_sum);
auto xe =
paddle::platform::NgReshaper(node_neg, ngraph::Shape{batch_size, 1});
if (!is_soft_label) {
auto ignore_index = op_attrs.Get<int>("ignore_index");
auto ignore_node = ngraph::op::Constant::create(
label->get_element_type(), label_2d_shape, {ignore_index});
auto not_equal_node =
std::make_shared<ngraph::op::NotEqual>(label_2d, ignore_node);
auto mask = std::make_shared<ngraph::op::Convert>(not_equal_node,
xe->get_element_type());
xe = xe * mask;
}
paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map);
}
void BuildCrossEntropyGradNode(
const std::shared_ptr<paddle::framework::OperatorBase>& op,
std::shared_ptr<
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
ngb_node_map) {
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
const bool is_soft_label = op_attrs.Get<bool>("soft_label");
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map);
auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map);
auto x_shape = x->get_shape();
auto rank = x_shape.size();
std::shared_ptr<ngraph::Node> mask;
if (!is_soft_label) {
auto label_shape = label->get_shape();
label_shape.pop_back();
label = paddle::platform::NgReshaper(label, label_shape);
auto ignore_index = op_attrs.Get<int>("ignore_index");
auto ignore_node = ngraph::op::Constant::create(
label->get_element_type(), label_shape, {ignore_index});
auto not_equal_node =
std::make_shared<ngraph::op::NotEqual>(label, ignore_node);
mask = std::make_shared<ngraph::op::Convert>(not_equal_node,
x->get_element_type());
mask = std::make_shared<ngraph::op::Broadcast>(mask, x_shape,
ngraph::AxisSet{rank - 1});
label = std::make_shared<ngraph::op::OneHot>(label, x_shape, rank - 1);
}
auto dy_shape = dy->get_shape();
dy_shape.pop_back();
auto dy_reshape = paddle::platform::NgReshaper(dy, dy_shape);
auto dy_bcast = std::make_shared<ngraph::op::Broadcast>(
dy_reshape, x_shape, ngraph::AxisSet{rank - 1});
if (x->get_element_type() != label->get_element_type()) {
label = std::make_shared<ngraph::op::Convert>(label, x->get_element_type());
}
auto xe_grad = -label * dy_bcast / x;
if (!is_soft_label) {
xe_grad = xe_grad * mask;
}
paddle::platform::SetOutputNode(op, "X@GRAD", xe_grad, ngb_node_map);
}
} // namespace ngraphs
} // namespace operators
} // namespace paddle
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, randomize_probability
class TestCrossEntropyOp(OpTest):
"""Test cross-entropy with discrete one-hot labels.
"""
def setUp(self):
self.op_type = "cross_entropy"
self.soft_label = False
self.ignore_index = -100
self.dtype = np.float64
self.batch_size = 30
self.class_num = 10
self._cpu_only = True
self.init_dtype_type()
self.init_attr_type()
self.init_bs_class_num()
self.init_x()
self.init_label()
self.get_cross_entropy()
self.inputs = {"X": self.x, "Label": self.label}
self.outputs = {"Y": self.cross_entropy}
self.attrs = {
"soft_label": self.soft_label,
"ignore_index": self.ignore_index
}
def init_x(self):
self.x = randomize_probability(
self.batch_size, self.class_num, dtype=self.dtype)
def init_label(self):
self.label = np.random.randint(
0, self.class_num, (self.batch_size, 1), dtype="int64")
def get_cross_entropy(self):
self.cross_entropy = np.asmatrix(
[[-np.log(self.x[i][self.label[i][0]])]
for i in range(self.x.shape[0])],
dtype="float64")
def init_attr_type(self):
pass
def init_dtype_type(self):
pass
def init_bs_class_num(self):
pass
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Y", numeric_grad_delta=0.001)
class TestCrossEntropyOp2(TestCrossEntropyOp):
"""Test cross-entropy with vectorized soft labels.
"""
def init_label(self):
self.label = np.random.uniform(
0.1, 1.0, [self.batch_size, self.class_num]).astype(self.dtype)
self.label /= self.label.sum(axis=1, keepdims=True)
def get_cross_entropy(self):
self.cross_entropy = (-self.label * np.log(self.x)).sum(
axis=1, keepdims=True).astype(self.dtype)
def init_attr_type(self):
self.soft_label = True
def init_dtype_type(self):
self.dtype = np.float32
def init_bs_class_num(self):
self.batch_size = 5
self.class_num = 37
def test_check_grad(self):
self.check_grad(
["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001)
class TestCrossEntropyOp3(TestCrossEntropyOp):
"""Test cross-entropy with vectorized one-hot representation of labels.
"""
def init_label(self):
self.label_index = np.random.randint(0, self.class_num,
(self.batch_size))
self.label = np.zeros(self.x.shape).astype(self.dtype)
self.label[np.arange(self.batch_size), self.label_index] = 1
def get_cross_entropy(self):
self.cross_entropy = np.asmatrix(
[[-np.log(self.x[i][self.label_index[i]])]
for i in range(self.x.shape[0])]).astype(self.dtype)
def init_attr_type(self):
self.soft_label = True
def init_dtype_type(self):
self.dtype = np.float32
def init_bs_class_num(self):
self.batch_size = 5
self.class_num = 17
def test_check_grad(self):
self.check_grad(
["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001)
class TestCrossEntropyOp4(TestCrossEntropyOp):
"""Test high rank tensor cross-entropy with discrete one-hot labels.
"""
def init_x(self):
self.shape = [10, 2, 4]
self.ins_num = np.prod(np.array(self.shape))
self.X_2d = randomize_probability(self.ins_num,
self.class_num).astype(self.dtype)
self.x = self.X_2d.reshape(self.shape + [self.class_num])
def init_label(self):
self.label_2d = np.random.randint(
0, self.class_num, (self.ins_num, 1), dtype="int64")
self.label = self.label_2d.reshape(self.shape + [1])
def get_cross_entropy(self):
cross_entropy_2d = np.asmatrix(
[[-np.log(self.X_2d[i][self.label_2d[i][0]])]
for i in range(self.X_2d.shape[0])]).astype(self.dtype)
self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape +
[1])
def init_attr_type(self):
self.soft_label = False
def init_dtype_type(self):
self.dtype = np.float64
def init_bs_class_num(self):
self.class_num = 10
class TestCrossEntropyOp5(TestCrossEntropyOp):
"""Test high rank tensor cross-entropy with vectorized soft labels.
"""
def init_x(self):
self.shape = [4, 3]
self.ins_num = np.prod(np.array(self.shape))
self.X_2d = randomize_probability(self.ins_num,
self.class_num).astype(self.dtype)
self.x = self.X_2d.reshape(self.shape + [self.class_num])
def init_label(self):
self.label_2d = np.random.uniform(
0.1, 1.0, [self.ins_num, self.class_num]).astype(self.dtype)
self.label_2d /= self.label_2d.sum(axis=1, keepdims=True)
self.label = self.label_2d.reshape(self.shape + [self.class_num])
def get_cross_entropy(self):
cross_entropy_2d = (-self.label_2d * np.log(self.X_2d)).sum(
axis=1, keepdims=True).astype(self.dtype)
self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape +
[1])
def init_attr_type(self):
self.soft_label = True
def init_dtype_type(self):
self.dtype = np.float32
def init_bs_class_num(self):
self.class_num = 37
def test_check_grad(self):
self.check_grad(
["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001)
class TestCrossEntropyOp6(TestCrossEntropyOp):
"""Test high rank tensor cross-entropy with vectorized one-hot representation of labels.
"""
def init_x(self):
self.shape = [4, 3, 2]
self.ins_num = np.prod(np.array(self.shape))
self.X_2d = randomize_probability(self.ins_num,
self.class_num).astype(self.dtype)
self.x = self.X_2d.reshape(self.shape + [self.class_num])
def init_label(self):
self.label_index_2d = np.random.randint(
0, self.class_num, (self.ins_num), dtype="int64")
label_2d = np.zeros(self.X_2d.shape)
label_2d[np.arange(self.ins_num), self.label_index_2d] = 1
self.label = label_2d.reshape(self.shape + [self.class_num]).astype(
self.dtype)
def get_cross_entropy(self):
cross_entropy_2d = np.asmatrix(
[[-np.log(self.X_2d[i][self.label_index_2d[i]])]
for i in range(self.X_2d.shape[0])])
self.cross_entropy = np.array(cross_entropy_2d).reshape(
self.shape + [1]).astype(self.dtype)
def init_attr_type(self):
self.soft_label = True
def init_dtype_type(self):
self.dtype = np.float32
def init_bs_class_num(self):
self.class_num = 17
def test_check_grad(self):
self.check_grad(
["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001)
class TestCrossEntropyOp7(TestCrossEntropyOp):
"""Test cross-entropy with ignore index.
"""
def init_label(self):
self.label = np.random.randint(
0, self.class_num, (self.batch_size, 1), dtype="int64")
def get_cross_entropy(self):
self.cross_entropy = np.asmatrix(
[[-np.log(self.x[i][self.label[i][0]])]
if self.label[i][0] != self.ignore_index else [0]
for i in range(self.x.shape[0])]).astype(self.dtype)
def init_attr_type(self):
self.soft_label = False
self.ignore_index = 3
def init_dtype_type(self):
self.dtype = np.float64
def init_bs_class_num(self):
self.batch_size = 30
self.class_num = 10
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册