From 7524ac934599b66db67dcf4b189c5174297ea8be Mon Sep 17 00:00:00 2001 From: zhang wenhui Date: Wed, 10 Mar 2021 16:26:58 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90NPU=E3=80=91support=20npu=20kernel=20f?= =?UTF-8?q?or=20fill=5Fconstant=20op=20(#31521)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add fill_constant npu * add fill_constant npu * fix --- .../fluid/operators/fill_constant_op_npu.cc | 87 +++++++++++++++ .../npu/test_fill_constant_op_npu.py | 102 ++++++++++++++++++ 2 files changed, 189 insertions(+) create mode 100644 paddle/fluid/operators/fill_constant_op_npu.cc create mode 100644 python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py diff --git a/paddle/fluid/operators/fill_constant_op_npu.cc b/paddle/fluid/operators/fill_constant_op_npu.cc new file mode 100644 index 00000000000..9d5499e00c8 --- /dev/null +++ b/paddle/fluid/operators/fill_constant_op_npu.cc @@ -0,0 +1,87 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/operators/fill_constant_op.h" +#include "paddle/fluid/operators/npu_op_runner.h" +#include "paddle/fluid/operators/utils.h" + +namespace paddle { +namespace operators { + +template +class FillConstantNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto data_type = + static_cast(ctx.Attr("dtype")); + auto str_value = ctx.Attr("str_value"); + auto float_value = ctx.Attr("value"); + + auto* out_var = ctx.Output("Out"); + auto place = ctx.GetPlace(); + auto stream = + ctx.template device_context() + .stream(); + + T value; + if (str_value.empty()) { + value = static_cast(float_value); + } else { + // handle NaN/Inf first, which cannot be read from stream. + if (str_value == "inf") { + value = static_cast(std::numeric_limits::infinity()); + } else if (str_value == "-inf") { + value = static_cast(-std::numeric_limits::infinity()); + } else if (str_value == "nan") { + value = static_cast(std::numeric_limits::quiet_NaN()); + } else { + std::stringstream convert_stream(str_value); + if (std::is_same::value) { + int64_t tmp_value; + convert_stream >> tmp_value; + value = static_cast(tmp_value); + } else { + double tmp_value; + convert_stream >> tmp_value; + value = static_cast(tmp_value); + } + } + } + auto shape = GetShape(ctx); + + Tensor tensor_tmp(data_type); + tensor_tmp.mutable_data({1}, ctx.GetPlace()); + TensorFromVector(std::vector{value}, ctx.device_context(), &tensor_tmp); + + out_var->mutable_data(shape, place); + auto runner = NpuOpRunner("FillD", {tensor_tmp}, {*out_var}, + {{"dims", framework::vectorize(shape)}}); + runner.Run(stream); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_NPU_KERNEL( + fill_constant, + ops::FillConstantNPUKernel, + ops::FillConstantNPUKernel, + ops::FillConstantNPUKernel, + ops::FillConstantNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py new file mode 100644 index 00000000000..6e619bfd11f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py @@ -0,0 +1,102 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid +from paddle.fluid import core + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestFillConstant(OpTest): + def setUp(self): + self.set_npu() + self.place = paddle.NPUPlace(0) + self.op_type = "fill_constant" + self.init_dtype() + + self.inputs = {} + self.attrs = {'shape': [123, 92], 'value': 3.8} + self.outputs = {'Out': np.full((123, 92), 3.8)} + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +class TestFillConstantInt(OpTest): + def setUp(self): + self.set_npu() + self.place = paddle.NPUPlace(0) + self.op_type = "fill_constant" + + self.inputs = {} + self.attrs = { + 'shape': [123, 92], + 'value': 1, + 'dtype': core.VarDesc.VarType.INT32 + } + self.outputs = {'Out': np.full((123, 92), 1).astype(self.dtype)} + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.int32 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +class TestFillConstantFP16(OpTest): + def setUp(self): + self.set_npu() + self.place = paddle.NPUPlace(0) + self.op_type = "fill_constant" + + self.inputs = {} + self.attrs = { + 'shape': [123, 92], + 'value': 1.0, + 'dtype': core.VarDesc.VarType.FP16 + } + self.outputs = {'Out': np.full((123, 92), 1.0).astype(self.dtype)} + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + + +if __name__ == '__main__': + unittest.main() -- GitLab