From b94d7ff37f78943d744af11d51313d8774a02f07 Mon Sep 17 00:00:00 2001 From: zhulei <563755780@qq.com> Date: Mon, 30 Aug 2021 19:14:57 +0800 Subject: [PATCH] [NPU] Add log_loss op (#35010) * [NPU] Add log_loss op * [NPU] Add log_loss op * [NPU] Add log_loss op --- paddle/fluid/operators/log_loss_op_npu.cc | 115 ++++++++++++++++++ .../unittests/npu/test_log_loss_op_npu.py | 110 +++++++++++++++++ 2 files changed, 225 insertions(+) create mode 100644 paddle/fluid/operators/log_loss_op_npu.cc create mode 100644 python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py diff --git a/paddle/fluid/operators/log_loss_op_npu.cc b/paddle/fluid/operators/log_loss_op_npu.cc new file mode 100644 index 00000000000..a8d906d4b5c --- /dev/null +++ b/paddle/fluid/operators/log_loss_op_npu.cc @@ -0,0 +1,115 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the Licnse. */ + +#include "paddle/fluid/operators/log_loss_op.h" +#include +#include "paddle/fluid/operators/npu_op_runner.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +void LogLossAdds(const platform::Place& place, const aclrtStream& stream, + const Tensor* x, float scale, Tensor* y) { + // Calculate y = x + scale + y->mutable_data(x->dims(), place); + const auto& runner = NpuOpRunner("Adds", {*x}, {*y}, {{"value", scale}}); + runner.Run(stream); +} + +template +void LogLossMuls(const platform::Place& place, const aclrtStream& stream, + const Tensor* x, float scale, Tensor* y) { + // Calculate y = x + scale + y->mutable_data(x->dims(), place); + const auto& runner = NpuOpRunner("Muls", {*x}, {*y}, {{"value", scale}}); + runner.Run(stream); +} + +template +void LogLossBCE(const platform::Place& place, const aclrtStream& stream, + const Tensor* x, const Tensor* y, Tensor* z) { + z->mutable_data(x->dims(), place); + const auto& runner = + NpuOpRunner("BinaryCrossEntropy", {*x, *y}, {*z}, + {{"reduction", static_cast("none")}}); + runner.Run(stream); +} + +template +void LogLossBCEGrad(const platform::Place& place, const aclrtStream& stream, + const Tensor* x, const Tensor* y, const Tensor* dout, + Tensor* dx) { + dx->mutable_data(x->dims(), place); + const auto& runner = + NpuOpRunner("BinaryCrossEntropyGrad", {*x, *y, *dout}, {*dx}, + {{"reduction", static_cast("none")}}); + runner.Run(stream); +} + +template +class LogLossNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* y = ctx.Output("Loss"); + auto* pred = ctx.Input("Predicted"); + auto* label = ctx.Input("Labels"); + auto epsilon = static_cast(ctx.Attr("epsilon")); + + auto place = ctx.GetPlace(); + auto stream = + ctx.template device_context() + .stream(); + + float factor = 1 / (1 + 2 * epsilon); + float coef = std::log(factor); + LogLossAdds(place, stream, pred, epsilon, y); + LogLossMuls(place, stream, y, factor, y); + LogLossBCE(place, stream, y, label, y); + LogLossAdds(place, stream, y, coef, y); + } +}; + +template +class LogLossGradNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* pred = ctx.Input("Predicted"); + auto* label = ctx.Input("Labels"); + auto* dloss = ctx.Input(framework::GradVarName("Loss")); + auto* dpred = ctx.Output(framework::GradVarName("Predicted")); + auto epsilon = static_cast(ctx.Attr("epsilon")); + + auto place = ctx.GetPlace(); + auto stream = + ctx.template device_context() + .stream(); + + if (dpred) { + LogLossBCEGrad(place, stream, pred, label, dloss, dpred); + LogLossMuls(place, stream, dpred, 1 / (1 + 2 * epsilon), dpred); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_NPU_KERNEL(log_loss, ops::LogLossNPUKernel); + +REGISTER_OP_NPU_KERNEL(log_loss_grad, ops::LogLossGradNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py new file mode 100644 index 00000000000..ff1b0e53dfe --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py @@ -0,0 +1,110 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid + +paddle.enable_static() + + +def sigmoid_array(x): + return 1 / (1 + np.exp(-x)) + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestLogLossOp(OpTest): + def setUp(self): + self.set_npu() + self.op_type = 'log_loss' + self.place = paddle.NPUPlace(0) + + self.init_dtype() + + self.set_inputs() + self.set_attrs() + self.set_outputs() + + def set_inputs(self): + samples_num = 100 + x = np.random.random((samples_num, 1)).astype(self.dtype) + predicted = sigmoid_array(x) + labels = np.random.randint(0, 2, (samples_num, 1)).astype(self.dtype) + self.inputs = {'Predicted': predicted, 'Labels': labels} + + def set_attrs(self): + epsilon = 1e-7 + self.attrs = {'epsilon': epsilon} + + def set_outputs(self): + epsilon = self.attrs['epsilon'] + labels = self.inputs['Labels'] + predicted = self.inputs['Predicted'] + loss = -labels * np.log(predicted + epsilon) - ( + 1 - labels) * np.log(1 - predicted + epsilon) + self.outputs = {'Loss': loss} + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad_with_place(self.place, ['Predicted'], 'Loss') + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestLogLossOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program()): + + def test_x_type(): + input_data = np.random.random(100, 1).astype("float32") + fluid.layers.log_loss(input_data) + + self.assertRaises(TypeError, test_x_type) + + def test_x_dtype(): + x2 = fluid.layers.data(name='x2', shape=[100, 1], dtype='int32') + fluid.layers.log_loss(x2) + + self.assertRaises(TypeError, test_x_dtype) + + def test_label_type(): + input_data = np.random.random(100, 1).astype("float32") + fluid.layers.log_loss(input_data) + + self.assertRaises(TypeError, test_label_type) + + def test_label_dtype(): + x2 = fluid.layers.data(name='x2', shape=[100, 1], dtype='int32') + fluid.layers.log_loss(x2) + + self.assertRaises(TypeError, test_label_dtype) + + +if __name__ == '__main__': + unittest.main() -- GitLab