From 468c1ad751433c0dc2b75063981b73be910e64e3 Mon Sep 17 00:00:00 2001 From: zhangyikun02 <48021248+zhangyk0314@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:20:58 +0800 Subject: [PATCH] support bce_loss and bce_loss_grad in XPU, test=kunlun (#41610) --- paddle/fluid/operators/bce_loss_op_xpu.cc | 70 +++++++++++++++++ .../fluid/platform/device/xpu/xpu2_op_list.h | 3 + .../unittests/xpu/test_bce_loss_op_xpu.py | 76 +++++++++++++++++++ 3 files changed, 149 insertions(+) create mode 100644 paddle/fluid/operators/bce_loss_op_xpu.cc create mode 100644 python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py diff --git a/paddle/fluid/operators/bce_loss_op_xpu.cc b/paddle/fluid/operators/bce_loss_op_xpu.cc new file mode 100644 index 00000000000..8ec80efceb9 --- /dev/null +++ b/paddle/fluid/operators/bce_loss_op_xpu.cc @@ -0,0 +1,70 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_XPU +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device/device_wrapper.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class XPUBCELossKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x = context.Input("X"); + auto* labels = context.Input("Label"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + + auto x_numel = x->numel(); + auto& dev_ctx = context.template device_context(); + int r = xpu::bce_loss(dev_ctx.x_context(), x->data(), + labels->data(), out->data(), x_numel); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "bce_loss"); + } +}; + +template +class XPUBCELossGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x = context.Input("X"); + auto* labels = context.Input("Label"); + auto* dout = context.Input(framework::GradVarName("Out")); + auto* dx = context.Output(framework::GradVarName("X")); + dx->mutable_data(context.GetPlace()); + + auto x_numel = x->numel(); + auto& dev_ctx = context.template device_context(); + int r = xpu::bce_loss_grad(dev_ctx.x_context(), x->data(), + labels->data(), dout->data(), + dx->data(), x_numel); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "bce_loss_grad"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_XPU_KERNEL( + bce_loss, ops::XPUBCELossKernel); +REGISTER_OP_XPU_KERNEL( + bce_loss_grad, + ops::XPUBCELossGradKernel); + +#endif // PADDLE_WITH_XPU diff --git a/paddle/fluid/platform/device/xpu/xpu2_op_list.h b/paddle/fluid/platform/device/xpu/xpu2_op_list.h index 65273710598..3a047b8fce7 100644 --- a/paddle/fluid/platform/device/xpu/xpu2_op_list.h +++ b/paddle/fluid/platform/device/xpu/xpu2_op_list.h @@ -43,6 +43,9 @@ XPUOpMap& get_kl2_ops() { {"batch_norm_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"batch_norm", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, + {"bce_loss_grad", + XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, + {"bce_loss", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"bilinear_interp_v2", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"bilinear_interp_v2_grad", diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py new file mode 100644 index 00000000000..a8173f054a1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import sys +sys.path.append("..") +import paddle +import paddle.fluid as fluid +import numpy as np +import unittest +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper + +paddle.enable_static() + + +def bce_loss(input, label): + return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input)) + + +class XPUTestBceLossOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'bce_loss' + self.use_dynamic_create_class = False + + class TestBceLossOp(XPUOpTest): + def setUp(self): + self.op_type = "bce_loss" + self.dtype = self.in_type + self.place = paddle.XPUPlace(0) + self.init_test_case() + input_np = np.random.uniform(0.1, 0.8, + self.shape).astype(self.dtype) + label_np = np.random.randint(0, 2, self.shape).astype(self.dtype) + output_np = bce_loss(input_np, label_np) + + self.inputs = {'X': input_np, 'Label': label_np} + self.outputs = {'Out': output_np} + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad_with_place(self.place, ['X'], 'Out') + + def init_test_case(self): + self.shape = [10, 10] + + class TestBceLossOpCase1(TestBceLossOp): + def init_test_cast(self): + self.shape = [2, 3, 4, 5] + + class TestBceLossOpCase2(TestBceLossOp): + def init_test_cast(self): + self.shape = [2, 3, 20] + + +support_types = get_xpu_op_support_types('bce_loss') +for stype in support_types: + create_test_class(globals(), XPUTestBceLossOp, stype) + +if __name__ == "__main__": + paddle.enable_static() + unittest.main() -- GitLab