From 23bb2836a6304711f02a0866ed4f87589229f7a7 Mon Sep 17 00:00:00 2001 From: QingshuChen Date: Sat, 29 Jan 2022 11:53:09 +0800 Subject: [PATCH] fix kunlun2 softmax unitest bug (#39274) * fix kunlun2 softmax unitest bug *test=kunlun * minor --- .../fluid/platform/device/xpu/xpu2_op_list.h | 7 +- .../unittests/xpu/test_softmax_op_xpu.py | 102 ++++++++---------- tools/check_file_diff_approvals.sh | 4 +- 3 files changed, 49 insertions(+), 64 deletions(-) diff --git a/paddle/fluid/platform/device/xpu/xpu2_op_list.h b/paddle/fluid/platform/device/xpu/xpu2_op_list.h index 87644584330..cb2b57474d1 100644 --- a/paddle/fluid/platform/device/xpu/xpu2_op_list.h +++ b/paddle/fluid/platform/device/xpu/xpu2_op_list.h @@ -307,8 +307,8 @@ XPUOpMap& get_kl2_ops() { {"slice", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::FP16, XPUPlace()), pOpKernelType(vartype::INT32, XPUPlace())})}, - {"softmax_grad", - XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, + {"softmax", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), + pOpKernelType(vartype::FP16, XPUPlace())})}, {"softmax_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::FP16, XPUPlace())})}, @@ -317,9 +317,6 @@ XPUOpMap& get_kl2_ops() { pOpKernelType(vartype::FP16, XPUPlace())})}, {"softmax_with_cross_entropy", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, - {"softmax", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, - {"softmax", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), - pOpKernelType(vartype::FP16, XPUPlace())})}, {"split", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::INT32, XPUPlace())})}, {"squeeze2_grad", diff --git a/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py index f0f0e3d86df..aa56a463b90 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py @@ -18,6 +18,8 @@ import sys import unittest sys.path.append("..") from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper + paddle.enable_static() np.random.seed(10) @@ -40,63 +42,49 @@ def ref_softmax(x, axis=None, dtype=None): return np.apply_along_axis(stable_softmax, axis, x_t) -class TestXPUSoftmaxOp(XPUOpTest): - def setUp(self): - self.op_type = "softmax" - self.shape = [2, 3, 4, 5] - self.axis = -1 - self.set_attrs() - self.init_type() - - x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - out = np.apply_along_axis(stable_softmax, self.axis, x) - - self.inputs = {'X': x} - self.outputs = {'Out': out} - self.attrs = {'axis': self.axis, 'use_xpu': True} - - def init_type(self): - self.dtype = np.float16 - - def set_attrs(self): - pass - - def test_check_output(self): - self.check_output_with_place(paddle.XPUPlace(0), atol=1e-4) - - def test_check_grad(self): - self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out') - - -# class TestXPUSoftmaxAxis3(TestXPUSoftmaxOp): -# def set_attrs(self): -# self.axis = 3 - -# class TestXPUSoftmax2D(TestXPUSoftmaxOp): -# def set_attrs(self): -# self.shape = [10, 12] - -# class TestXPUSoftmax3D(TestXPUSoftmaxOp): -# def set_attrs(self): -# self.shape = [4, 5, 6] - -# class TestXPUSoftmaxAxis3FP16(TestXPUSoftmaxOp): -# def set_attrs(self): -# self.axis = 3 -# def init_type(self): -# self.dtype = np.float16 - -# class TestXPUSoftmax2DFP16(TestXPUSoftmaxOp): -# def set_attrs(self): -# self.shape = [10, 12] -# def init_type(self): -# self.dtype = np.float16 - -# class TestXPUSoftmax3DFP16(TestXPUSoftmaxOp): -# def set_attrs(self): -# self.shape = [4, 5, 6] -# def init_type(self): -# self.dtype = np.float16 +class XPUTestSoftmaxOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'softmax' + self.use_dynamic_create_class = True + + def dynamic_create_class(self): + base_class = self.TestSoftmaxOp + classes = [] + shapes = [[2, 3, 4, 5], [7, 1], [63, 18], [2, 38512], [3, 4095]] + axis = [-1, 0, 1] + for shape in shapes: + for axi in axis: + class_name = 'XPUTestSoftmax_' + \ + str(shape) + "_" + str(axi) + attr_dict = {'shape': shape, 'axis': axi} + classes.append([class_name, attr_dict]) + return base_class, classes + + class TestSoftmaxOp(XPUOpTest): + def setUp(self): + self.op_type = "softmax" + if not hasattr(self, 'shape'): + self.shape = [1, 7] + self.axis = -1 + self.dtype = np.float32 + + x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) + out = np.apply_along_axis(stable_softmax, self.axis, x) + + self.inputs = {'X': x} + self.outputs = {'Out': out} + self.attrs = {'axis': self.axis, 'use_xpu': True} + + def test_check_output(self): + self.check_output_with_place(paddle.XPUPlace(0), atol=1e-4) + + def test_check_grad(self): + self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out') + + +support_types = get_xpu_op_support_types('softmax') +for stype in support_types: + create_test_class(globals(), XPUTestSoftmaxOp, stype) if __name__ == "__main__": unittest.main() diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index 503c763c08c..77b4fc5667d 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -336,8 +336,8 @@ if [ "${NEW_OP_TEST_ADDED}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then CHECK_WHOLE=$CHECK_OUTPUT$CHECK_OUTPUT_WITH_PLACE$CHECK_GRAD$CHECK_GRAD_CHECK if [ "${CHECK_WHOLE}" != "" ] ; then CHECK_OP=${CHECK_WHOLE//+/'\n+'} - echo_line="Please use the default precision parameters of 'atol, rtol, eps, max_relative_error'. If you don't use the default value, you must have one RD (Xreki (Recommend), fuyinno4 (Recommend for kunlun), zhiqiu or qili93 (Recommend for NPU) , luotao1, lanxianghit or phlrain) approval for the usage of other values. The detailed information is in the link: https://github.cor/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements. The error line is ${CHECK_OP}\n" - check_approval 1 6836917 47554610 12538138 43953930 35824027 6888866 16605440 + echo_line="Please use the default precision parameters of 'atol, rtol, eps, max_relative_error'. If you don't use the default value, you must have one RD (Xreki (Recommend), fuyinno4, QingshuChen(Recommend for kunlun), zhiqiu or qili93 (Recommend for NPU) , luotao1, lanxianghit or phlrain) approval for the usage of other values. The detailed information is in the link: https://github.cor/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements. The error line is ${CHECK_OP}\n" + check_approval 1 6836917 47554610 12538138 43953930 35824027 6888866 16605440 2002279 fi fi -- GitLab