diff --git a/python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt b/python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt index c6aaf363138d420e9b133f4d00304dc54817c8a6..cf70f63580b997e1254c530befcf5299106432ca 100644 --- a/python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt @@ -24,5 +24,7 @@ foreach(TEST_OP ${DIST_TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach() -set_tests_properties(test_mul_op_xpu PROPERTIES TIMEOUT 120) set_tests_properties(test_conv2d_op_xpu PROPERTIES TIMEOUT 120) +set_tests_properties(test_mul_op_xpu PROPERTIES TIMEOUT 120) +set_tests_properties(test_matmul_v2_op_xpu PROPERTIES TIMEOUT 900) +set_tests_properties(test_matmul_op_xpu PROPERTIES TIMEOUT 300) diff --git a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py index 3da9e32b015eda8efc6208fb50adc8cb99a73a55..f58c0d4cf074cd578185ce0ce9a25a44b3730293 100644 --- a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py +++ b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py @@ -87,7 +87,6 @@ xpu_test_device_type_white_list = ['xpu1_float64'] xpu_test_op_type_white_list = [ 'dropout_float16', 'dropout_grad_float16', - 'matmul_v2_float16', "grad_add_float32" # no api for grad_add, skip ] xpu_test_device_op_white_list = [] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index 1c68f8fb6bf1693a907ccc349a032373644ba77e..73f61c2d9d5bada2d9d00eb9edfb6024d6c29c9a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -294,6 +294,10 @@ class TestMatmulBaseGenerator(XPUOpTest): self.op_type = "matmul" self.dtype = np.float32 if not hasattr(self, 'in_type') else self.in_type + + self.__class__.no_need_check_grad = False if not hasattr( + self, 'no_need_check_grad') else self.no_need_check_grad + shape_X = [4, 5] if not hasattr(self, 'shape_X') else self.shape_X shape_Y = [5, 6] if not hasattr(self, 'shape_Y') else self.shape_Y transpose_X = False if not hasattr(self, @@ -314,12 +318,20 @@ class TestMatmulBaseGenerator(XPUOpTest): self.check_output_with_place(place, atol=1e-3) def test_check_grad_normal(self): + if hasattr(self.__class__, "no_need_check_grad" + ) and self.__class__.no_need_check_grad == True: + return + place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X', 'Y'], 'Out', max_relative_error=5e-2) def test_check_grad_ignore_x(self): + if hasattr(self.__class__, "no_need_check_grad" + ) and self.__class__.no_need_check_grad == True: + return + place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['Y'], 'Out', @@ -327,6 +339,10 @@ class TestMatmulBaseGenerator(XPUOpTest): no_grad_set=set("X")) def test_check_grad_ignore_y(self): + if hasattr(self.__class__, "no_need_check_grad" + ) and self.__class__.no_need_check_grad == True: + return + place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X'], 'Out', @@ -351,6 +367,9 @@ class XPUTestMatmulOp1(XPUOpTestWrapper): for transose_x in [True, False]: for transose_y in [True, False]: for batch in batch_size: + no_need_check_grad = False + if batch >= 5: + no_need_check_grad = True class_name = ( 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}' .format(dim_X, dim_Y, transose_x, transose_y, @@ -362,6 +381,7 @@ class XPUTestMatmulOp1(XPUOpTestWrapper): 'shape_Y': shape_y, 'transpose_X': transose_x, 'transpose_Y': transose_y, + 'no_need_check_grad': no_need_check_grad, 'op_type': "matmul" } classes.append([class_name, attr_dict]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py index 8f31981355403677bb136af6d7c0f5d67b7d62b9..92b9ae3ae8998fea14726c4d2df6336d01245619 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py @@ -80,6 +80,8 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): self.dtype = self.in_type self.config() self.op_type = "matmul_v2" + if self.dtype == np.float16 or self.dtype == "float16": + self.__class__.no_need_check_grad = True x = np.random.random(self.x_shape).astype(self.dtype) y = np.random.random(self.y_shape).astype(self.dtype) # -0.1 ~ 0.1 @@ -99,6 +101,9 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): self.check_output_with_place(place) def test_check_grad(self): + if hasattr(self.__class__, "no_need_check_grad" + ) and self.__class__.no_need_check_grad == True: + return place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X', 'Y'], 'Out')