From 9df84bd693ed0806511fdbf9d5493c0070389d82 Mon Sep 17 00:00:00 2001 From: zhang wenhui Date: Wed, 10 Mar 2021 10:08:37 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90NPU=E3=80=91add=20scale=20op=20for=20n?= =?UTF-8?q?pu=20(#31499)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add scale npu * fix * fix --- paddle/fluid/operators/scale_op_npu.cc | 69 ++++++++++++++ .../tests/unittests/npu/test_scale_op_npu.py | 89 +++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 paddle/fluid/operators/scale_op_npu.cc create mode 100644 python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py diff --git a/paddle/fluid/operators/scale_op_npu.cc b/paddle/fluid/operators/scale_op_npu.cc new file mode 100644 index 0000000000..ee7210a778 --- /dev/null +++ b/paddle/fluid/operators/scale_op_npu.cc @@ -0,0 +1,69 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/operators/npu_op_runner.h" +#include "paddle/fluid/operators/scale_op.h" + +namespace paddle { +namespace operators { + +template +class ScaleNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* out = ctx.Output("Out"); + auto scale = static_cast(ctx.Attr("scale")); + auto bias = static_cast(ctx.Attr("bias")); + auto bias_after_scale = ctx.Attr("bias_after_scale"); + auto stream = + ctx.template device_context() + .stream(); + float _power = 1.0; + if (bias_after_scale) { + out->mutable_data(ctx.GetPlace()); + auto runner = + NpuOpRunner("Power", {*x}, {*out}, + {{"power", _power}, {"scale", scale}, {"shift", bias}}); + + runner.Run(stream); + } else { + Tensor tmp_x(x->type()); + tmp_x.Resize(x->dims()); + tmp_x.mutable_data(ctx.GetPlace()); + auto runner_tmp = NpuOpRunner("Adds", {*x}, {tmp_x}, {{"value", bias}}); + runner_tmp.Run(stream); + + out->mutable_data(ctx.GetPlace()); + float _bias = 0.0; + auto runner = + NpuOpRunner("Power", {tmp_x}, {*out}, + {{"power", _power}, {"scale", scale}, {"shift", _bias}}); + runner.Run(stream); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_NPU_KERNEL( + scale, ops::ScaleNPUKernel, + ops::ScaleNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py new file mode 100644 index 0000000000..9b4547bc24 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py @@ -0,0 +1,89 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestScale(OpTest): + def setUp(self): + self.set_npu() + self.op_type = "scale" + self.place = paddle.NPUPlace(0) + self.init_dtype() + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype( + np.random.random((10, 10)).astype(self.dtype)) + } + self.attrs = {'scale': -2.3, 'bias': 0, 'bias_after_scale': True} + self.outputs = { + 'Out': self.inputs['X'] * self.dtype(self.attrs['scale']) + } + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +class TestFP16Scale(TestScale): + def init_dtype(self): + self.dtype = np.float16 + + +class TestBiasAfterScale(OpTest): + def setUp(self): + self.set_npu() + self.op_type = "scale" + self.place = paddle.NPUPlace(0) + self.init_dtype() + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype( + np.random.random((10, 10)).astype(self.dtype)) + } + self.attrs = {'scale': -2.3, 'bias': 0, 'bias_after_scale': False} + self.outputs = { + 'Out': self.inputs['X'] * self.dtype(self.attrs['scale']) + } + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +if __name__ == '__main__': + unittest.main() -- GitLab