未验证 提交 23bb2836 编写于 作者: Q QingshuChen 提交者: GitHub

fix kunlun2 softmax unitest bug (#39274)

* fix kunlun2 softmax unitest bug
*test=kunlun

* minor
上级 6b3a6a9f
......@@ -307,8 +307,8 @@ XPUOpMap& get_kl2_ops() {
{"slice", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace())})},
{"softmax_grad",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"softmax", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})},
{"softmax_grad",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})},
......@@ -317,9 +317,6 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::FP16, XPUPlace())})},
{"softmax_with_cross_entropy",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"softmax", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"softmax", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})},
{"split", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace())})},
{"squeeze2_grad",
......
......@@ -18,6 +18,8 @@ import sys
import unittest
sys.path.append("..")
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
np.random.seed(10)
......@@ -40,13 +42,31 @@ def ref_softmax(x, axis=None, dtype=None):
return np.apply_along_axis(stable_softmax, axis, x_t)
class TestXPUSoftmaxOp(XPUOpTest):
class XPUTestSoftmaxOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'softmax'
self.use_dynamic_create_class = True
def dynamic_create_class(self):
base_class = self.TestSoftmaxOp
classes = []
shapes = [[2, 3, 4, 5], [7, 1], [63, 18], [2, 38512], [3, 4095]]
axis = [-1, 0, 1]
for shape in shapes:
for axi in axis:
class_name = 'XPUTestSoftmax_' + \
str(shape) + "_" + str(axi)
attr_dict = {'shape': shape, 'axis': axi}
classes.append([class_name, attr_dict])
return base_class, classes
class TestSoftmaxOp(XPUOpTest):
def setUp(self):
self.op_type = "softmax"
self.shape = [2, 3, 4, 5]
if not hasattr(self, 'shape'):
self.shape = [1, 7]
self.axis = -1
self.set_attrs()
self.init_type()
self.dtype = np.float32
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = np.apply_along_axis(stable_softmax, self.axis, x)
......@@ -55,12 +75,6 @@ class TestXPUSoftmaxOp(XPUOpTest):
self.outputs = {'Out': out}
self.attrs = {'axis': self.axis, 'use_xpu': True}
def init_type(self):
self.dtype = np.float16
def set_attrs(self):
pass
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0), atol=1e-4)
......@@ -68,35 +82,9 @@ class TestXPUSoftmaxOp(XPUOpTest):
self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out')
# class TestXPUSoftmaxAxis3(TestXPUSoftmaxOp):
# def set_attrs(self):
# self.axis = 3
# class TestXPUSoftmax2D(TestXPUSoftmaxOp):
# def set_attrs(self):
# self.shape = [10, 12]
# class TestXPUSoftmax3D(TestXPUSoftmaxOp):
# def set_attrs(self):
# self.shape = [4, 5, 6]
# class TestXPUSoftmaxAxis3FP16(TestXPUSoftmaxOp):
# def set_attrs(self):
# self.axis = 3
# def init_type(self):
# self.dtype = np.float16
# class TestXPUSoftmax2DFP16(TestXPUSoftmaxOp):
# def set_attrs(self):
# self.shape = [10, 12]
# def init_type(self):
# self.dtype = np.float16
# class TestXPUSoftmax3DFP16(TestXPUSoftmaxOp):
# def set_attrs(self):
# self.shape = [4, 5, 6]
# def init_type(self):
# self.dtype = np.float16
support_types = get_xpu_op_support_types('softmax')
for stype in support_types:
create_test_class(globals(), XPUTestSoftmaxOp, stype)
if __name__ == "__main__":
unittest.main()
......@@ -336,8 +336,8 @@ if [ "${NEW_OP_TEST_ADDED}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then
CHECK_WHOLE=$CHECK_OUTPUT$CHECK_OUTPUT_WITH_PLACE$CHECK_GRAD$CHECK_GRAD_CHECK
if [ "${CHECK_WHOLE}" != "" ] ; then
CHECK_OP=${CHECK_WHOLE//+/'\n+'}
echo_line="Please use the default precision parameters of 'atol, rtol, eps, max_relative_error'. If you don't use the default value, you must have one RD (Xreki (Recommend), fuyinno4 (Recommend for kunlun), zhiqiu or qili93 (Recommend for NPU) , luotao1, lanxianghit or phlrain) approval for the usage of other values. The detailed information is in the link: https://github.cor/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements. The error line is ${CHECK_OP}\n"
check_approval 1 6836917 47554610 12538138 43953930 35824027 6888866 16605440
echo_line="Please use the default precision parameters of 'atol, rtol, eps, max_relative_error'. If you don't use the default value, you must have one RD (Xreki (Recommend), fuyinno4, QingshuChen(Recommend for kunlun), zhiqiu or qili93 (Recommend for NPU) , luotao1, lanxianghit or phlrain) approval for the usage of other values. The detailed information is in the link: https://github.cor/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements. The error line is ${CHECK_OP}\n"
check_approval 1 6836917 47554610 12538138 43953930 35824027 6888866 16605440 2002279
fi
fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册