From 041000c2001c106c1cd571da2663577b6f82f429 Mon Sep 17 00:00:00 2001 From: Lux et Veritas <1004239791@qq.com> Date: Thu, 2 Jun 2022 14:17:46 +0800 Subject: [PATCH] [MLU]add mlu kernel for squeeze and squeeze2 (#43094) Co-authored-by: liupeiyu --- paddle/fluid/operators/squeeze_op_mlu.cc | 61 +++++++++ .../unittests/mlu/test_squeeze2_op_mlu.py | 83 ++++++++++++ .../unittests/mlu/test_squeeze_op_mlu.py | 119 ++++++++++++++++++ 3 files changed, 263 insertions(+) create mode 100644 paddle/fluid/operators/squeeze_op_mlu.cc create mode 100755 python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py create mode 100644 python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py diff --git a/paddle/fluid/operators/squeeze_op_mlu.cc b/paddle/fluid/operators/squeeze_op_mlu.cc new file mode 100644 index 00000000000..d492846b41c --- /dev/null +++ b/paddle/fluid/operators/squeeze_op_mlu.cc @@ -0,0 +1,61 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_MLU +#include +#include + +#include "paddle/fluid/operators/squeeze_op.h" +#include "paddle/fluid/platform/device/mlu/device_context.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_MLU_KERNEL( + squeeze, ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel); + +REGISTER_OP_MLU_KERNEL( + squeeze_grad, ops::SqueezeGradKernel, + ops::SqueezeGradKernel, + ops::SqueezeGradKernel, + ops::SqueezeGradKernel, + ops::SqueezeGradKernel, + ops::SqueezeGradKernel, + ops::SqueezeGradKernel, + ops::SqueezeGradKernel); + +REGISTER_OP_MLU_KERNEL( + squeeze2, ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel); + +REGISTER_OP_MLU_KERNEL( + squeeze2_grad, ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel); +#endif diff --git a/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py new file mode 100755 index 00000000000..51606c36cfd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py @@ -0,0 +1,83 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import sys +sys.path.append("..") +import numpy as np +import paddle +from op_test import OpTest + +paddle.enable_static() + + +# Correct: General. +class TestSqueezeOp(OpTest): + def setUp(self): + self.init_test_case() + self.set_mlu() + self.op_type = "squeeze2" + self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} + self.init_attrs() + self.outputs = { + "Out": self.inputs["X"].reshape(self.new_shape), + "XShape": np.random.random(self.ori_shape).astype("float32") + } + + def set_mlu(self): + self.__class__.use_mlu = True + self.place = paddle.device.MLUPlace(0) + + def test_check_output(self): + self.check_output_with_place(self.place, no_check_set=['XShape']) + + def test_check_grad(self): + self.check_grad_with_place(self.place, ['X'], 'Out') + + def init_test_case(self): + self.ori_shape = (1, 3, 1, 40) + self.axes = (0, 2) + self.new_shape = (3, 40) + + def init_attrs(self): + self.attrs = {"axes": self.axes} + + +# Correct: There is mins axis. +class TestSqueezeOp1(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 20, 1, 5) + self.axes = (0, -2) + self.new_shape = (20, 5) + + +# Correct: No axes input. +class TestSqueezeOp2(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 20, 1, 5) + self.axes = () + self.new_shape = (20, 5) + + +# Correct: Just part of axes be squeezed. +class TestSqueezeOp3(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (6, 1, 5, 1, 4, 1) + self.axes = (1, -1) + self.new_shape = (6, 5, 1, 4) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py new file mode 100644 index 00000000000..10703182c0a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py @@ -0,0 +1,119 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import sys +sys.path.append("..") + +import numpy as np + +import paddle +import paddle.fluid as fluid +from paddle.fluid import compiler, Program, program_guard +from op_test import OpTest, convert_float_to_uint16 +import paddle.fluid.core as core + +paddle.enable_static() + + +# Correct: General. +class TestSqueezeOp(OpTest): + def setUp(self): + self.op_type = "squeeze" + self.init_test_case() + self.set_mlu() + self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")} + self.init_attrs() + self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape), } + + def set_mlu(self): + self.__class__.use_mlu = True + self.place = paddle.device.MLUPlace(0) + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + def init_test_case(self): + self.ori_shape = (1, 3, 1, 40) + self.axes = (0, 2) + self.new_shape = (3, 40) + + def init_attrs(self): + self.attrs = {"axes": self.axes} + + +class TestSqueezeBF16Op(OpTest): + def setUp(self): + self.op_type = "squeeze" + self.dtype = np.uint16 + self.init_test_case() + x = np.random.random(self.ori_shape).astype("float32") + out = x.reshape(self.new_shape) + self.inputs = {"X": convert_float_to_uint16(x)} + self.init_attrs() + self.outputs = {"Out": convert_float_to_uint16(out)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + def init_test_case(self): + self.ori_shape = (1, 3, 1, 40) + self.axes = (0, 2) + self.new_shape = (3, 40) + + def init_attrs(self): + self.attrs = {"axes": self.axes} + + +# Correct: There is mins axis. +class TestSqueezeOp1(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 3, 1, 40) + self.axes = (0, -2) + self.new_shape = (3, 40) + + +# Correct: No axes input. +class TestSqueezeOp2(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 20, 1, 5) + self.axes = () + self.new_shape = (20, 5) + + +# Correct: Just part of axes be squeezed. +class TestSqueezeOp3(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (6, 1, 5, 1, 4, 1) + self.axes = (1, -1) + self.new_shape = (6, 5, 1, 4) + + +# Correct: The demension of axis is not of size 1 remains unchanged. +class TestSqueezeOp4(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (6, 1, 5, 1, 4, 1) + self.axes = (1, 2) + self.new_shape = (6, 5, 1, 4, 1) + + +if __name__ == "__main__": + unittest.main() -- GitLab