diff --git a/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py index 4354883a44274fb05125aa3524e0f1ef0c330a66..3ac6e23d21a4d1ce9d8119f4f5de9a974c63d4b5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py @@ -303,8 +303,8 @@ class TestNet(unittest.TestCase): def test_mlu(self): mlu_pred, mlu_loss = self._test(True) cpu_pred, cpu_loss = self._test(False) - self.assertTrue(np.allclose(mlu_pred, cpu_pred, rtol=1e-3)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss, rtol=1e-3)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-3) + np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-3) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py index 5c69cdb74093af7eadacdbb22757f5251df514c3..e60664f27a0e17c0a665f6cafe887009c9861cb5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py @@ -249,8 +249,8 @@ class TestNet(unittest.TestCase): def test_mlu(self): mlu_pred, mlu_loss = self._test(True) cpu_pred, cpu_loss = self._test(False) - self.assertTrue(np.allclose(mlu_pred, cpu_pred, rtol=1e-3)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss, rtol=1e-3)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-3) + np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-3) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py index 86f044b9d3dad9893bf8ad20a652e024842adca8..0f50da6b4f0596856a275bfcc641bd6fc3e154d0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py @@ -219,7 +219,10 @@ class TestBatchNormOpInference(unittest.TestCase): self.init_kernel_type() def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + atol=atol, + err_msg=msg) def check_with_place(self, place, data_layout, dtype, shape): epsilon = 0.00001 @@ -672,7 +675,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute(x, False, False) y2 = compute(x, True, True) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2) def test_static(self): places = [fluid.CPUPlace()] @@ -697,7 +700,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute(x, False, False) y2 = compute(x, True, True) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2) class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py index b0fec2bdd0f6ab50319459b01f07ceabee0aa2eb..b4f58a7c5f0189ab9b9298a1ee3f0557db734104 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py @@ -137,8 +137,8 @@ class TestBatchNorm(unittest.TestCase): y2 = compute_v2(x) y3 = compute_v3(x, False, False) y4 = compute_v4(x) - self.assertTrue(np.allclose(y1, y2)) - self.assertTrue(np.allclose(y3, y4)) + np.testing.assert_allclose(y1, y2) + np.testing.assert_allclose(y3, y4) def test_static(self): places = [fluid.CPUPlace()] @@ -172,7 +172,7 @@ class TestBatchNorm(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x, False, False) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2) class TestBatchNormChannelLast(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py index 78dd988aa7ef89c15321f6170eaed18356e07167..3805d27a14f049b3ff899b971b8cd1a456271efa 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py @@ -58,7 +58,7 @@ def test_static_layer(place, "weight": weight_np }, fetch_list=[res]) - return static_result + return static_result[0] def test_static_functional(place, @@ -98,7 +98,7 @@ def test_static_functional(place, "weight": weight_np }, fetch_list=[res]) - return static_result + return static_result[0] def test_dygraph_layer(place, @@ -174,16 +174,18 @@ class TestBCELoss(unittest.TestCase): dy_result = test_dygraph_layer(place, input_np, label_np, reduction) expected = calc_bceloss(input_np, label_np, reduction) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, expected, rtol=1e-6) + np.testing.assert_allclose(static_result, dy_result) + np.testing.assert_allclose(dy_result, expected, rtol=1e-6) static_functional = test_static_functional( place, input_np, label_np, reduction) dy_functional = test_dygraph_functional(place, input_np, label_np, reduction) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-6) + np.testing.assert_allclose(static_functional, dy_functional) + np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCELoss_weight(self): input_np = np.random.uniform(0.1, 0.8, @@ -207,9 +209,9 @@ class TestBCELoss(unittest.TestCase): label_np, reduction, weight_np=weight_np) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, expected, rtol=1e-6) + np.testing.assert_allclose(static_result, dy_result) + np.testing.assert_allclose(dy_result, expected, rtol=1e-6) static_functional = test_static_functional(place, input_np, label_np, @@ -220,9 +222,9 @@ class TestBCELoss(unittest.TestCase): label_np, reduction, weight_np=weight_np) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, expected, rtol=1e-6) + np.testing.assert_allclose(static_functional, dy_functional) + np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCELoss_error(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py index 42989a5c44b101ab0b97f28e62004f8a63866db5..6b0b91cce9383bac185c01f65fd3a170d49ac2ed 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py @@ -61,7 +61,7 @@ def test_static(place, res = call_bce_layer(logit, label, weight, reduction, pos_weight) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) - return static_result + return static_result[0] paddle.enable_static() @@ -86,9 +86,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): reduction=reduction) expected = calc_bce_with_logits_loss(logit_np, label_np, reduction) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, expected, rtol=1e-6) + np.testing.assert_allclose(static_result, dy_result) + np.testing.assert_allclose(dy_result, expected, rtol=1e-6) static_functional = test_static(place, logit_np, label_np, @@ -100,9 +100,11 @@ class TestBCEWithLogitsLoss(unittest.TestCase): reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-6) + np.testing.assert_allclose(static_functional, dy_functional) + np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCEWithLogitsLoss_weight(self): logit_np = np.random.uniform(0.1, 0.8, @@ -126,9 +128,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): label_np, reduction, weight_np=weight_np) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, expected, rtol=1e-6) + np.testing.assert_allclose(static_result, dy_result) + np.testing.assert_allclose(dy_result, expected, rtol=1e-6) static_functional = test_static(place, logit_np, label_np, @@ -141,9 +143,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): weight_np=weight_np, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, expected, rtol=1e-6) + np.testing.assert_allclose(static_functional, dy_functional) + np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCEWithLogitsLoss_pos_weight(self): logit_np = np.random.uniform(0.1, 0.8, @@ -160,9 +162,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): reduction, pos_weight_np) expected = calc_bce_with_logits_loss(logit_np, label_np, reduction, weight_np, pos_weight_np) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, expected) + np.testing.assert_allclose(static_result, dy_result) + np.testing.assert_allclose(dy_result, expected) static_functional = test_static(place, logit_np, label_np, @@ -177,9 +179,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): reduction, pos_weight_np, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, expected) + np.testing.assert_allclose(static_functional, dy_functional) + np.testing.assert_allclose(dy_functional, expected) def test_BCEWithLogitsLoss_error(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py index d7e53639490d25b243984c2b1189be54e435ea36..ece4c2f3304abb45ba5f19b78ad0435f43d1bf3e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py @@ -563,7 +563,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase): out_w=12, align_corners=True) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy(unittest.TestCase): @@ -585,7 +585,7 @@ class TestBilinearInterpOpAPI_dy(unittest.TestCase): size=[12, 12], mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy2(unittest.TestCase): @@ -609,7 +609,7 @@ class TestBilinearInterpOpAPI_dy2(unittest.TestCase): size=size, mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy3(unittest.TestCase): @@ -633,7 +633,7 @@ class TestBilinearInterpOpAPI_dy3(unittest.TestCase): size=[size, size], mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy4(unittest.TestCase): @@ -658,7 +658,7 @@ class TestBilinearInterpOpAPI_dy4(unittest.TestCase): mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py index 04332b061f885ccfa54819c2f4b08419f85abff9..1b3ce96111573d9e70384425fba0fcb89c62bb61 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py @@ -210,22 +210,26 @@ class TestDistBase(unittest.TestCase): input2 = np.random.random((10, 1000)).astype(np_data_type) if col_type == "broadcast": need_result = input2 - self.assertTrue(np.allclose(tr0_out, need_result)) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr0_out, need_result) + np.testing.assert_allclose(tr1_out, need_result) elif col_type == "allreduce": need_result = input1 + input2 - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out, + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "reduce": need_result = input1 + input2 - self.assertTrue(np.allclose(tr0_out, need_result)) + np.testing.assert_allclose(tr0_out, need_result) elif col_type == "allgather": need_result = np.vstack((input1, input2)) tr_out0 = np.vstack((tr0_out[0], tr0_out[1])) tr_out1 = np.vstack((tr1_out[0], tr1_out[1])) - self.assertTrue(np.allclose(tr_out0, need_result)) - self.assertTrue(np.allclose(tr_out1, need_result)) + np.testing.assert_allclose(tr_out0, need_result) + np.testing.assert_allclose(tr_out1, need_result) else: pass diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py index 4ec1e7f7528bb5c8a3f2c7e6e62b61234df2bea1..47fb3a1a2305cd39a67cddc2a4b884344d542e1a 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py @@ -259,47 +259,63 @@ class TestDistBase(unittest.TestCase): input2 = np.random.random((10, 1000)).astype(np_data_type) if col_type == "broadcast": need_result = input2 - self.assertTrue(np.allclose(tr0_out, need_result)) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr0_out, need_result) + np.testing.assert_allclose(tr1_out, need_result) elif col_type == "allreduce_sum": need_result = input1 + input2 - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out, + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "allreduce_prod": need_result = input1 * input2 - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out, + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "allreduce_max": need_result = np.maximum(input1, input2) - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out, + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "allreduce_min": need_result = np.minimum(input1, input2) - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out, + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "reduce_sum": need_result = input1 + input2 - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr1_out, need_result) elif col_type == "reduce_prod": need_result = input1 * input2 - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr1_out, need_result) elif col_type == "reduce_max": need_result = np.maximum(input1, input2) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr1_out, need_result) elif col_type == "reduce_min": need_result = np.minimum(input1, input2) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr1_out, need_result) elif col_type == "allgather": need_result = np.vstack((input1, input2)) - self.assertTrue(np.allclose(tr0_out, need_result)) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr0_out, need_result) + np.testing.assert_allclose(tr1_out, need_result) else: pass diff --git a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py index e9d172c89410e42128155e9eb0e02dcfae6c612f..8497853561d878dd50f209873b609635cb90fcf1 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py @@ -268,11 +268,11 @@ class TestDropoutAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res]) - self.assertTrue(np.allclose(fetches[0], res_np)) + np.testing.assert_allclose(fetches[0], res_np) fetches2 = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res6]) - self.assertTrue(np.allclose(fetches2[0], res_np2)) + np.testing.assert_allclose(fetches2[0], res_np2) def test_static(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py index 6ecbf51c28af867b9a128cdeb0f119e45c6b320d..a1be1152d16fe55482080867934333c99a5689da 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py @@ -361,8 +361,8 @@ class TestElementwiseMaxNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6) + np.testing.assert_allclose(mlu_loss, cpu_loss) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py index f04f0eb781e5dc596264dde2600782c943cd69ab..20bd124e4ab0a5ede92928bf3299e70d9fc61328 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py @@ -222,8 +222,8 @@ class TestElementwiseMinOpNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6) + np.testing.assert_allclose(mlu_loss, cpu_loss) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py index cbc99c2fa66866619f9af1681e0f9891953f6532..19aad53f1d3aa0fc7f27e41f7c965f5660b09f71 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py @@ -302,7 +302,7 @@ class TestExpandV2DygraphAPI(unittest.TestCase): expand_1 = paddle.expand(a, shape=[2, 5]) np_array = np.array([2, 5]) expand_2 = paddle.expand(a, shape=np_array) - self.assertTrue(np.array_equal(expand_1.numpy(), expand_2.numpy())) + np.testing.assert_allclose(expand_1.numpy(), expand_2.numpy()) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index 604dbf4ddbcce5f971a68a1602eb3983c66a6e24..d78c9405e36710c02ab09badd084aa67ff241e94 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -122,7 +122,7 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase): result_array = np.array(out.get_tensor()) full_array = np.full((123, 92), 3.8, 'float32') - self.assertTrue(np.array_equal(result_array, full_array)) + np.testing.assert_allclose(result_array, full_array) def test_fill_constant_with_selected_rows(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py index deee1a38b31013d2857b72bd38f12fefee31564e..47d74e97f9a16c4dec721b65f31e802d2bf04d68 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py @@ -292,7 +292,7 @@ class TestGatherNdAPI2(unittest.TestCase): output = paddle.fluid.layers.gather(input, index) output_np = output.numpy() expected_output = np.array([3, 4]) - self.assertTrue(np.allclose(output_np, expected_output)) + np.testing.assert_allclose(output_np[0], expected_output, rtol=1e-6) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py index 6c6ddda303d4e4eec6e9dc2149b9df8c08b04827..bc59b3d0faffd4527e97fe757b02c6c1a73a1594 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py @@ -101,7 +101,7 @@ class API_TestDygraphGather(unittest.TestCase): output = paddle.fluid.layers.gather(input, index) output_np = output.numpy() expected_output = np.array([[3, 4], [5, 6]]).astype('int32') - self.assertTrue(np.allclose(output_np, expected_output)) + np.testing.assert_allclose(output_np, expected_output) paddle.enable_static() def test_out12(self): @@ -113,7 +113,7 @@ class API_TestDygraphGather(unittest.TestCase): output = paddle.gather(x, index, axis=0) output_np = output.numpy() expected_output = gather_numpy(input_1, index_1, axis=0) - self.assertTrue(np.allclose(output_np, expected_output)) + np.testing.assert_allclose(output_np, expected_output) paddle.enable_static() def test_zero_index(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py index 9f755de6872348eaf76f17997d86e511cee0b4f5..acc711ffdbd9abfb867c6a1dc45b2837d25d85ae 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py @@ -65,8 +65,12 @@ class TestGaussianRandomOp(OpTest): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01), - "hist: " + str(hist) + " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, + hist2, + rtol=0, + atol=0.01, + err_msg="hist: " + str(hist) + " hist2: " + + str(hist2)) class TestMeanStdAreInt(TestGaussianRandomOp): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py index 2cf89789bfc8b31cd84e0ce54d802352be34544c..4d6dc9c8b5d9368b14590f5ebc109be61516954c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py @@ -150,8 +150,8 @@ class TestGeluNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred, atol=1e-3)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss, atol=1e-3)) + np.testing.assert_allclose(mlu_pred, cpu_pred, atol=1e-3) + np.testing.assert_allclose(mlu_loss, cpu_loss, atol=1e-3) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py index 5050e2006f3339a64f1964685827c1a97cd6ac59..5fd9ea9fcdc74cf1d412f9a173ea886c9197fcb2 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py @@ -147,7 +147,7 @@ class TestHardsigmoidAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_hardsigmoid(self.x_np) for r in res: - self.assertTrue(np.allclose(out_ref, r)) + np.testing.assert_allclose(out_ref, r, rtol=1e-6) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -157,22 +157,23 @@ class TestHardsigmoidAPI(unittest.TestCase): out2 = m(x) out_ref = ref_hardsigmoid(self.x_np) for r in [out1, out2]: - self.assertTrue(np.allclose(out_ref, r.numpy())) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-6) paddle.enable_static() def test_fluid_api(self): + paddle.enable_static() with fluid.program_guard(fluid.Program()): x = fluid.data('X', self.x_np.shape, self.x_np.dtype) out = fluid.layers.hard_sigmoid(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5) - self.assertTrue(np.allclose(out_ref, res[0])) + np.testing.assert_allclose(out_ref, res[0]) paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.fluid.layers.hard_sigmoid(x) - self.assertTrue(np.allclose(out_ref, out.numpy())) + np.testing.assert_allclose(out_ref, out.numpy()) paddle.enable_static() def test_errors(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py index 5df59be28a87bf087abb4395ee1e5ba0a5366983..e5514285ba1f00b873d4cfff3a229c6082057bc3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py @@ -45,7 +45,11 @@ class TestLayerNormOp(unittest.TestCase): self.__class__.use_mlu = True def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-5, + atol=atol, + err_msg=msg) def check_forward_backward(self, shape, @@ -152,11 +156,11 @@ class TestLayerNormOp(unittest.TestCase): 1e-3) self.__assert_close(x_grad, out[3], "x_grad") if has_scale: - self.__assert_close(scale_grad, + self.__assert_close(scale_grad.reshape(-1), out[fetch_list.index('scale@GRAD')], "scale_grad", 1e-3) if has_bias: - self.__assert_close(bias_grad, + self.__assert_close(bias_grad.reshape(-1), out[fetch_list.index('bias@GRAD')], "bias_grad") @@ -287,7 +291,7 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): x_np, weight_np, bias_np, 'float32') def assert_equal(x, y): - self.assertTrue(np.array_equal(x, y)) + np.testing.assert_allclose(x, y) assert_equal(y_np_1, y_np_2) assert_equal(x_g_np_1, x_g_np_2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py index 0aad79eb61f92a719373ec4e8c12477bda0a58f4..edf5d2bb284101d3a74e406e3426156d5532e9f3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py @@ -145,8 +145,8 @@ class TestLeakyReluNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6) + np.testing.assert_allclose(mlu_loss, cpu_loss) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py index 82aeb577205d5de9c1d38b6eca31db3a5870b0bf..6f068c341aab747460b06399dd57d4b2ae5bac92 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py @@ -123,7 +123,7 @@ class TestLog2(TestActivation): feed={"data_x": input_x}, fetch_list=[out1]) expected_res = np.log2(input_x) - self.assertTrue(np.allclose(res1, expected_res)) + np.testing.assert_allclose(res1[0], expected_res, rtol=1e-6) # dygraph with fluid.dygraph.guard(): @@ -134,7 +134,7 @@ class TestLog2(TestActivation): z_expected = np.array(np.log2(np_x)) np.savetxt("np_z.txt", np_z.flatten(), fmt="%.4f") np.savetxt("z_expected.txt", z_expected.flatten(), fmt="%.4f") - self.assertTrue(np.allclose(np_z, z_expected, atol=1e-6)) + np.testing.assert_allclose(np_z, z_expected, atol=1e-6) class TestLog10(TestActivation): @@ -173,7 +173,7 @@ class TestLog10(TestActivation): feed={"data_x": input_x}, fetch_list=[out1]) expected_res = np.log10(input_x) - self.assertTrue(np.allclose(res1, expected_res)) + np.testing.assert_allclose(res1[0], expected_res, rtol=1e-6) # dygraph with fluid.dygraph.guard(): @@ -182,7 +182,7 @@ class TestLog10(TestActivation): z = paddle.log10(data_x) np_z = z.numpy() z_expected = np.array(np.log10(np_x)) - self.assertTrue(np.allclose(np_z, z_expected)) + np.testing.assert_allclose(np_z, z_expected, rtol=1e-4) class TestLogHalf(TestLog): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py index 1b81455f4779746e1e3291d86ed8a31a96e2bc50..91e8a86ce685609e088a1f278b368f76e7dc2416 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py @@ -136,19 +136,20 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): ref_out = np.apply_along_axis(ref_log_softmax, axis, self.x) logsoftmax = paddle.nn.LogSoftmax(axis) + paddle.enable_static() # test static api with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], ref_out)) + np.testing.assert_allclose(out[0], ref_out, rtol=1e-6) # test dygrapg api paddle.disable_static() x = paddle.to_tensor(self.x) y = logsoftmax(x) - self.assertTrue(np.allclose(y.numpy(), ref_out)) + np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-6) paddle.enable_static() def test_check_api(self): @@ -177,12 +178,12 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], ref_out)) + np.testing.assert_allclose(out[0], ref_out, rtol=1e-6) paddle.disable_static() x = paddle.to_tensor(self.x) y = F.log_softmax(x, axis, dtype) - self.assertTrue(np.allclose(y.numpy(), ref_out), True) + np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-6) paddle.enable_static() def test_check_api(self): @@ -191,12 +192,14 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): self.check_api(-1, 'float32') def test_errors(self): + paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, F.log_softmax, x) x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') + paddle.disable_static() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py index 31eb98b7a88503a1cc5e75c47c4b07ab1fa6e70f..7a1f590cf3da625948e456b01f191083a110a5e8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py @@ -316,7 +316,7 @@ class TestMergedMomentum(unittest.TestCase): outs2 = run_op(False) self.assertEqual(len(outs1), len(outs2)) for i, (out1, out2) in enumerate(zip(outs1, outs2)): - self.assertTrue(np.allclose(out1, out2, atol=1e-7)) + np.testing.assert_allclose(out1, out2, atol=1e-7) def test_main(self): self.check_with_place(self.place, multi_precision=False) @@ -370,13 +370,13 @@ class TestMergedMomentum2(unittest.TestCase): outs2 = run_op(use_nesterov=True, use_merged=False) self.assertEqual(len(outs1), len(outs2)) for i, (out1, out2) in enumerate(zip(outs1, outs2)): - self.assertTrue(np.allclose(out1, out2, atol=1e-7)) + np.testing.assert_allclose(out1, out2, atol=1e-7) outs3 = run_op(use_nesterov=False, use_merged=True) outs4 = run_op(use_nesterov=False, use_merged=False) self.assertEqual(len(outs3), len(outs4)) for j, (out3, out4) in enumerate(zip(outs3, outs4)): - self.assertTrue(np.allclose(out3, out4, atol=1e-7)) + np.testing.assert_allclose(out3, out4, atol=1e-7) def test_main(self): self.check_with_place(self.place, multi_precision=False) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py index 59078a21d0fa871ab47a27310920fc5e90622d67..60364818439f465f7782ab3e0758cbb5b3c1f126 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py @@ -594,10 +594,10 @@ class TestNearestAPI(unittest.TestCase): out_h=12, out_w=12, align_corners=False) - self.assertTrue( - np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 1)))) + np.testing.assert_allclose(results[0], + np.transpose(expect_res, (0, 2, 3, 1))) for i in range(len(results) - 1): - self.assertTrue(np.allclose(results[i + 1], expect_res)) + np.testing.assert_allclose(results[i + 1], expect_res) class TestNearestInterpException(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index d33646cbfa32bc77c0c23fcd463c65a648e52d32..c3715342e7377e83ca012d9c9acab89d7eafbedd 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -993,7 +993,7 @@ class TestDygraphPool2DAPI(unittest.TestCase): paddings=[0, 0], pool_type='max', data_format='NHWC') - self.assertTrue(np.allclose(out1.numpy(), out2)) + np.testing.assert_allclose(out1.numpy(), out2) def test_lower_case(self): with fluid.dygraph.guard(): @@ -1010,7 +1010,7 @@ class TestDygraphPool2DAPI(unittest.TestCase): paddings=[0, 0], pool_type='max', data_format='NHWC') - self.assertTrue(np.allclose(out1.numpy(), out2)) + np.testing.assert_allclose(out1.numpy(), out2) def test_upper_case(self): with fluid.dygraph.guard(): @@ -1027,7 +1027,7 @@ class TestDygraphPool2DAPI(unittest.TestCase): paddings=[0, 0], pool_type='max', data_format='NHWC') - self.assertTrue(np.allclose(out1.numpy(), out2)) + np.testing.assert_allclose(out1.numpy(), out2) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py index 445dc449236b3f1eb891cdcba1c53d288c4ccacb..5412e6c4a7b606744ba0a76b5df63134f7f2b337 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py @@ -160,61 +160,61 @@ class TestRandomValue(unittest.TestCase): expect = [ 24562, 8409, 9379, 10328, 20503, 18059, 9681, 21883, 11783, 27413 ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_allclose(x[0:10], expect) expect = [ 29477, 27100, 9643, 16637, 8605, 16892, 27767, 2724, 1612, 13096 ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_allclose(x[10000:10010], expect) expect = [ 298, 4104, 16479, 22714, 28684, 7510, 14667, 9950, 15940, 28343 ] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_allclose(x[20000:20010], expect) x = paddle.randperm(30000, dtype='int64').numpy() expect = [ 6587, 1909, 5525, 23001, 6488, 14981, 14355, 3083, 29561, 8171 ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_allclose(x[0:10], expect) expect = [ 23460, 12394, 22501, 5427, 20185, 9100, 5127, 1651, 25806, 4818 ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_allclose(x[10000:10010], expect) expect = [5829, 4508, 16193, 24836, 8526, 242, 9984, 9243, 1977, 11839] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_allclose(x[20000:20010], expect) x = paddle.randperm(30000, dtype='float32').numpy() expect = [ 5154., 10537., 14362., 29843., 27185., 28399., 27561., 4144., 22906., 10705. ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_allclose(x[0:10], expect) expect = [ 1958., 18414., 20090., 21910., 22746., 27346., 22347., 3002., 4564., 26991. ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_allclose(x[10000:10010], expect) expect = [ 25580., 12606., 553., 16387., 29536., 4241., 20946., 16899., 16339., 4662. ] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_allclose(x[20000:20010], expect) x = paddle.randperm(30000, dtype='float64').numpy() expect = [ 19051., 2449., 21940., 11121., 282., 7330., 13747., 24321., 21147., 9163. ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_allclose(x[0:10], expect) expect = [ 15483., 1315., 5723., 20954., 13251., 25539., 5074., 1823., 14945., 17624. ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_allclose(x[10000:10010], expect) expect = [ 10516., 2552., 29970., 5941., 986., 8007., 24805., 26753., 12202., 21404. ] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_allclose(x[20000:20010], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py index a6bb42878a6845ee57ca9da75519313b1630db29..6c2b35912dc852c6a663c34f12dc6cd1af9292ab 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py @@ -163,8 +163,8 @@ class TestRelu6Net(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6) + np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-6) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py index 495711e5303f35ad86028d61b030743a7cfbb84b..53b3903e951d1fccdd810e4bb923927686bac3dc 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py @@ -165,8 +165,8 @@ class TestReluNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6) + np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-6) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py index d901813e3482ad03e80756d13cdf1620ca61e722..cd0b14e0c800f760a0e9883fb02ed8b2a095276c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py @@ -212,7 +212,7 @@ class TestScatterAPI(unittest.TestCase): mlu_value = mlu_exe.run(feed=feed, fetch_list=fetch)[0] return mlu_value - self.assertTrue(np.array_equal(test_dygraph(), test_static_graph())) + np.testing.assert_allclose(test_dygraph(), test_static_graph()) class TestScatterOpFp16(OpTest): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py index a074a9d91a8bc6411716e9d022ccdfd58820df0a..71116b4d3cebbd4a72df4c3d3ec01fe7960a6af0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py @@ -549,7 +549,7 @@ class TestSliceApiWithTensor(unittest.TestCase): ends=paddle.to_tensor(ends, dtype='int32')) a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) - self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) + np.testing.assert_allclose(a_1.numpy(), a_2.numpy()) def test_bool_tensor(self): with paddle.fluid.dygraph.guard(): @@ -565,7 +565,7 @@ class TestSliceApiWithTensor(unittest.TestCase): y_np = tt[0:3, 1:5, 2:4] self.assertTrue(paddle.bool == y_paddle.dtype) - self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) + np.testing.assert_array_equal(y_paddle.numpy(), y_np) class TestImperativeVarBaseGetItem(unittest.TestCase): @@ -620,11 +620,11 @@ class TestInferShape(unittest.TestCase): 100, ], [0], [1]) np_slice = x_arr[:, :, 0:1] - self.assertTrue(np.array_equal(pp_slice, np_slice)) + np.testing.assert_allclose(pp_slice, np_slice) pp_slice = paddle.slice(x, (-100, ), [0], [1]) np_slice = x_arr[0:1] - self.assertTrue(np.array_equal(pp_slice, np_slice)) + np.testing.assert_allclose(pp_slice, np_slice) x_arr = np.array([], dtype=np.float32) x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py index f112cd6f66fa24efff214ea51955aa2dbf576723..25dbbbd028e6ed79b71116b3bdebefbcbea7bac6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py @@ -157,8 +157,8 @@ class TestPowNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-5) + np.testing.assert_allclose(mlu_loss, cpu_loss) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py index 2728473f550885a33a4cadcc6142d2a608c5e393..5f283a6c157bf8aec567be8f8be5761858d04839 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py @@ -108,8 +108,8 @@ class API_TestSplit(unittest.TestCase): input1 = np.random.random([1, 10]).astype('float32') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) + np.testing.assert_allclose(ex_x0, r0) + np.testing.assert_allclose(ex_x1, r1) class API_TestSplit2(unittest.TestCase): @@ -123,8 +123,8 @@ class API_TestSplit2(unittest.TestCase): input1 = np.random.random([1, 10]).astype('float32') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, 2, axis=1) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) + np.testing.assert_allclose(ex_x0, r0) + np.testing.assert_allclose(ex_x1, r1) class API_TestDygraphSplit(unittest.TestCase): @@ -139,9 +139,9 @@ class API_TestDygraphSplit(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out) + np.testing.assert_allclose(ex_x1, x1_out) + np.testing.assert_allclose(ex_x2, x2_out) def test_out2(self): with fluid.dygraph.guard(paddle.MLUPlace(0)): @@ -153,9 +153,9 @@ class API_TestDygraphSplit(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.split(input_1, (1, 3), axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out) + np.testing.assert_allclose(ex_x1, x1_out) + np.testing.assert_allclose(ex_x2, x2_out) # attr(axis) is Tensor diff --git a/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py index 6a81c11c70b1bd9d295dd5d22248b5c55ce8f102..7dc668dfe56f6a2581ebeda9bb597d3832d74325 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py @@ -59,7 +59,7 @@ class TestL2LossDeterministic(unittest.TestCase): x = paddle.to_tensor(x_np) y1 = _C_ops.squared_l2_norm(x) y2 = _C_ops.squared_l2_norm(x) - self.assertTrue(np.array_equal(y1.numpy(), y2.numpy())) + np.testing.assert_allclose(y1.numpy(), y2.numpy()) def test_main(self): self.check_place(paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py index b7dec570394527e3ed862f00bb25c9a7d7157400..573081f9fe0f7f1d57d1b377a269ca6545e4e556 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py @@ -148,7 +148,7 @@ class API_test(unittest.TestCase): }, fetch_list=[result_stack]) expected_result = np.stack([input1, input2, input3], axis=0) - self.assertTrue(np.allclose(expected_result, result)) + np.testing.assert_allclose(expected_result, result) def test_single_tensor_error(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -169,14 +169,14 @@ class API_DygraphTest(unittest.TestCase): result = paddle.stack([x1, x2, x3]) result_np = result.numpy() expected_result = np.stack([data1, data2, data3]) - self.assertTrue(np.allclose(expected_result, result_np)) + np.testing.assert_allclose(expected_result, result_np) with fluid.dygraph.guard(place=paddle.MLUPlace(0)): y1 = fluid.dygraph.to_variable(data1) result = paddle.stack([y1], axis=0) result_np_2 = result.numpy() expected_result_2 = np.stack([data1], axis=0) - self.assertTrue(np.allclose(expected_result_2, result_np_2)) + np.testing.assert_allclose(expected_result_2, result_np_2) def test_single_tensor_error(self): with fluid.dygraph.guard(place=paddle.MLUPlace(0)): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py index e1023a94bec5fe32045c09599e6e3699813d2364..0b2d2ac86ff525217de6695af9b3bd86c8492b4f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py @@ -145,8 +145,8 @@ class TestTanhNet(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) mlu_pred, mlu_loss = self._test(True) - self.assertTrue(np.allclose(mlu_pred, cpu_pred)) - self.assertTrue(np.allclose(mlu_loss, cpu_loss)) + np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6) + np.testing.assert_allclose(mlu_loss, cpu_loss) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py index 57081f1a54564dade28f2f6599d8388271cdff88..94cae5f355b1bbd6e6c9b7c3ce60b6ab7dfc074e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py @@ -189,43 +189,43 @@ class TestTopKAPI(unittest.TestCase): # test case for basic test case 1 paddle_result = paddle.topk(input_tensor, k=2) numpy_result = numpy_topk(self.input_data, k=2) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0]) + np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 2 with axis paddle_result = paddle.topk(input_tensor, k=2, axis=1) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0]) + np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 3 with tensor K k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, k=k_tensor, axis=1) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0]) + np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 4 with tensor largest k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, k=2, axis=1, largest=False) numpy_result = numpy_topk(self.input_data, k=2, axis=1, largest=False) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0]) + np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 5 with axis -1 k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, k=2, axis=-1, largest=False) numpy_result = numpy_topk(self.input_data, k=2, axis=-1, largest=False) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0]) + np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 6 for the partial sort paddle_result = paddle.topk(large_input_tensor, k=1, axis=-1) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0]) + np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 7 for the unsorted paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) sort_paddle = numpy_topk(np.array(paddle_result[0].numpy()), axis=1, k=2) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0])) + np.testing.assert_allclose(sort_paddle[0], numpy_result[0]) def run_static(self, place): paddle.enable_static() @@ -263,32 +263,32 @@ class TestTopKAPI(unittest.TestCase): result7[0], result7[1] ]) numpy_result = numpy_topk(self.input_data, k=2) - self.assertTrue(np.allclose(paddle_result[0], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1], numpy_result[1])) + np.testing.assert_allclose(paddle_result[0], numpy_result[0]) + np.testing.assert_allclose(paddle_result[1], numpy_result[1]) numpy_result = numpy_topk(self.input_data, k=2, axis=-1) - self.assertTrue(np.allclose(paddle_result[2], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[3], numpy_result[1])) + np.testing.assert_allclose(paddle_result[2], numpy_result[0]) + np.testing.assert_allclose(paddle_result[3], numpy_result[1]) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(paddle_result[4], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[5], numpy_result[1])) + np.testing.assert_allclose(paddle_result[4], numpy_result[0]) + np.testing.assert_allclose(paddle_result[5], numpy_result[1]) numpy_result = numpy_topk(self.input_data, k=2, axis=1, largest=False) - self.assertTrue(np.allclose(paddle_result[6], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[7], numpy_result[1])) + np.testing.assert_allclose(paddle_result[6], numpy_result[0]) + np.testing.assert_allclose(paddle_result[7], numpy_result[1]) numpy_result = numpy_topk(self.input_data, k=2, axis=-1, largest=False) - self.assertTrue(np.allclose(paddle_result[8], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[9], numpy_result[1])) + np.testing.assert_allclose(paddle_result[8], numpy_result[0]) + np.testing.assert_allclose(paddle_result[9], numpy_result[1]) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - self.assertTrue(np.allclose(paddle_result[10], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[11], numpy_result[1])) + np.testing.assert_allclose(paddle_result[10], numpy_result[0]) + np.testing.assert_allclose(paddle_result[11], numpy_result[1]) sort_paddle = numpy_topk(paddle_result[12], axis=1, k=2) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0])) + np.testing.assert_allclose(sort_paddle[0], numpy_result[0]) def test_cases(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py index ad6359ed714d630559c3e43ce4aff148b3e93bac..da6557beb680d06cc6b97e9324f20733b33769a8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py @@ -154,8 +154,8 @@ class TestTrilTriuOpAPI(unittest.TestCase): feed={"x": data}, fetch_list=[tril_out, triu_out], ) - self.assertTrue(np.allclose(tril_out, np.tril(data))) - self.assertTrue(np.allclose(triu_out, np.triu(data))) + np.testing.assert_allclose(tril_out, np.tril(data)) + np.testing.assert_allclose(triu_out, np.triu(data)) def test_api_with_dygraph(self): paddle.disable_static() @@ -167,8 +167,8 @@ class TestTrilTriuOpAPI(unittest.TestCase): x = fluid.dygraph.to_variable(data) tril_out, triu_out = tensor.tril(x).numpy(), tensor.triu( x).numpy() - self.assertTrue(np.allclose(tril_out, np.tril(data))) - self.assertTrue(np.allclose(triu_out, np.triu(data))) + np.testing.assert_allclose(tril_out, np.tril(data)) + np.testing.assert_allclose(triu_out, np.triu(data)) def test_fluid_api(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py index 70289853e8921b6f64fa22b12b1cca42d09491c3..ca1b7b3e602b46743f65ad0138290f8bad0558b3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py @@ -71,8 +71,11 @@ class TestMLUUniformRandomOp(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, + prob, + rtol=0, + atol=0.01, + err_msg="hist: " + str(hist)) class TestMLUUniformRandomOpSelectedRows(unittest.TestCase): @@ -100,8 +103,11 @@ class TestMLUUniformRandomOpSelectedRows(unittest.TestCase): op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, + prob, + rtol=0, + atol=0.01, + err_msg="hist: " + str(hist)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py index 3f1d553f7386ecf22c4f60d46d094f73f2b92f4e..a22a4b1b8c7eb1c28e0b2f8f8d673a360cf306b1 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py @@ -288,7 +288,7 @@ class TestWhereDygraphAPI(unittest.TestCase): result = paddle.where(cond, a, b) result = result.numpy() expect = np.where(cond, a, b) - self.assertTrue(np.array_equal(expect, result)) + np.testing.assert_array_equal(expect, result) def test_dygraph_api_broadcast_1(self): cond_shape = [2, 4] @@ -351,7 +351,7 @@ class TestWhereDygraphAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.array([[0, 0], [1, 1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res)) data = np.array([True, True, False]) with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[(-1)]) @@ -364,7 +364,7 @@ class TestWhereDygraphAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.array([[0], [1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res)) class TestWhereOpError(unittest.TestCase):