diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py index 648e87f8c3174db1873545b0799054471cf8897f..6baf40486ddb69d3623bc1f645b2edd24f280804 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py @@ -70,8 +70,8 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): res = exe.run(main_program, feed={'x': feed_x}, fetch_list=[sums.name, x.grad_name]) - self.assertTrue(np.allclose(res[0], feed_add)) - self.assertTrue(np.allclose(res[1], ones / 1000.0)) + np.testing.assert_allclose(res[0], feed_add) + np.testing.assert_allclose(res[1], ones / 1000.0) class TestAssignOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py index 6455b157cb2ca28e2b0fc02569a02e03ca3b805a..175bf152fe11385105c5a61d451ca6e69696f1f6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import sys sys.path.append("..") @@ -53,7 +53,7 @@ class XPUTestAssignValueOp(XPUOpTestWrapper): self.outputs = {"Out": self.value} def init_data(self): - self.value = numpy.random.random(size=(2, 5)).astype(self.dtype) + self.value = np.random.random(size=(2, 5)).astype(self.dtype) self.attrs["fp32_values"] = [float(v) for v in self.value.flat] def test_forward(self): @@ -62,20 +62,20 @@ class XPUTestAssignValueOp(XPUOpTestWrapper): class TestAssignValueOp2(TestAssignValueOp): def init_data(self): - self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32) + self.value = np.random.random(size=(2, 5)).astype(np.int32) self.attrs["int32_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp3(TestAssignValueOp): def init_data(self): - self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64) + self.value = np.random.random(size=(2, 5)).astype(np.int64) self.attrs["int64_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp4(TestAssignValueOp): def init_data(self): - self.value = numpy.random.choice(a=[False, True], - size=(2, 5)).astype(numpy.bool) + self.value = np.random.choice(a=[False, True], + size=(2, 5)).astype(np.bool) self.attrs["bool_values"] = [int(v) for v in self.value.flat] @@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase): def setUp(self): self.init_dtype() - self.value = (-100 + 200 * numpy.random.random(size=(2, 5))).astype( + self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( self.dtype) self.place = fluid.XPUPlace(0) @@ -98,8 +98,10 @@ class TestAssignApi(unittest.TestCase): exe = fluid.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) - self.assertTrue(numpy.array_equal(fetched_x, self.value), - "fetch_x=%s val=%s" % (fetched_x, self.value)) + np.testing.assert_allclose(fetched_x, + self.value, + err_msg="fetch_x=%s val=%s" % + (fetched_x, self.value)) self.assertEqual(fetched_x.dtype, self.value.dtype) @@ -119,8 +121,8 @@ class TestAssignApi4(TestAssignApi): def setUp(self): self.init_dtype() - self.value = numpy.random.choice(a=[False, True], - size=(2, 5)).astype(numpy.bool) + self.value = np.random.choice(a=[False, True], + size=(2, 5)).astype(np.bool) self.place = fluid.XPUPlace(0) def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py index 9f15b72fe7d8b57a110ae994e477000d2d31dc29..f5c3e2b6a96e49e03f494a45f9f19507c64866e9 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py @@ -512,7 +512,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase): expect_res = bilinear_interp_np( x_data, out_h=12, out_w=12, align_corners=True) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res) ''' if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py index 33198a28933a525fea5a1eac56cc702e341d5baa..d0b41f459eaf2f349ae9c05b20673918bfbc1ccb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py @@ -186,14 +186,14 @@ class TestClipAPI(unittest.TestCase): }, fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) - self.assertTrue(np.allclose(res1, data.clip(0.2, 0.8))) - self.assertTrue(np.allclose(res2, data.clip(0.2, 0.9))) - self.assertTrue(np.allclose(res3, data.clip(min=0.3))) - self.assertTrue(np.allclose(res4, data.clip(max=0.7))) - self.assertTrue(np.allclose(res5, data.clip(min=0.2))) - self.assertTrue(np.allclose(res6, data.clip(max=0.8))) - self.assertTrue(np.allclose(res7, data.clip(max=-1))) - self.assertTrue(np.allclose(res8, data)) + np.testing.assert_allclose(res1, data.clip(0.2, 0.8)) + np.testing.assert_allclose(res2, data.clip(0.2, 0.9)) + np.testing.assert_allclose(res3, data.clip(min=0.3)) + np.testing.assert_allclose(res4, data.clip(max=0.7)) + np.testing.assert_allclose(res5, data.clip(min=0.2)) + np.testing.assert_allclose(res6, data.clip(max=0.8)) + np.testing.assert_allclose(res7, data.clip(max=-1)) + np.testing.assert_allclose(res8, data) paddle.disable_static() def test_clip_dygraph(self): @@ -213,9 +213,9 @@ class TestClipAPI(unittest.TestCase): images = paddle.to_tensor(data, dtype='float32') out_3 = self._executed_api(images, min=v_min, max=v_max) - self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) - self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) - self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8))) + np.testing.assert_allclose(out_1.numpy(), data.clip(0.2, 0.8)) + np.testing.assert_allclose(out_2.numpy(), data.clip(0.2, 0.9)) + np.testing.assert_allclose(out_3.numpy(), data.clip(0.2, 0.8)) def test_errors(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py index b4e8cf6b10e3734b59d49718becd775d1cfb2a06..3227d76a642225116de3eac6657086d08dadedf1 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py @@ -164,7 +164,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): m = paddle.nn.Dropout(p=0.) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np) class TestDropoutBackward(unittest.TestCase): @@ -188,10 +188,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper): out, mask = core.ops.dropout(input, 'dropout_prob', 0.5) out.backward() - self.assertTrue( - np.array_equal( - input.gradient(), - self.cal_grad_downscale_in_infer(mask.numpy()))) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_downscale_in_infer(mask.numpy())) def test_backward_upscale_train(self): for place in self.places: @@ -205,10 +204,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper): "upscale_in_train") out.backward() - self.assertTrue( - np.allclose( - input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), prob))) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_upscale_train(mask.numpy(), prob)) def test_backward_upscale_train_2(self): for place in self.places: @@ -222,10 +220,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper): "upscale_in_train") out.backward() - self.assertTrue( - np.allclose( - input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), prob))) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_upscale_train(mask.numpy(), prob)) support_types = get_xpu_op_support_types('dropout') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py index 851a5b521e17177e35d577b043c39504f35c9f52..867379bf81ef5c67234659c30cd786b0834ba135 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py @@ -86,7 +86,7 @@ class TestDistModelRun(unittest.TestCase): print("load inference model api rst:", load_inference_model_rst) # step 5: compare two results - self.assertTrue(np.allclose(dist_model_rst, load_inference_model_rst)) + np.testing.assert_allclose(dist_model_rst, load_inference_model_rst) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py index 0b2470228b94a1b0d2896046c03bc9b22db2836d..4a1601ed99065d8243e862e5abb4abcb9463761b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py @@ -75,8 +75,12 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01), - "hist: " + str(hist) + " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, + hist2, + rtol=0, + atol=0.01, + err_msg="hist: " + str(hist) + + " hist2: " + str(hist2)) class TestMeanStdAreInt(TestGaussianRandomOp): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index 73f61c2d9d5bada2d9d00eb9edfb6024d6c29c9a..f7be0e61d81000e12a871986d429f5b5c1c4c480 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -214,9 +214,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): expected_result = np.matmul(data1.reshape(1, 2), data2.reshape(2, 1)) - self.assertTrue( - np.allclose(np_res, expected_result, atol=1e-3), - "two value is\ + np.testing.assert_allclose(np_res, + expected_result, + atol=1e-3, + err_msg="two value is\ {}\n{}, check diff!".format(np_res, expected_result)) def test_dygraph_without_out(self): @@ -228,8 +229,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): data2 = fluid.dygraph.to_variable(input_array2) out = paddle.mm(data1, data2) expected_result = np.matmul(input_array1, input_array2) - self.assertTrue( - np.allclose(expected_result, out.numpy(), atol=1e-3)) + np.testing.assert_allclose(expected_result, + out.numpy(), + atol=1e-3) class Test_API_Matmul(unittest.TestCase): @@ -244,8 +246,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): self.in_type) out = paddle.matmul(data1, data2) expected_result = np.matmul(input_array1, input_array2) - self.assertTrue( - np.allclose(expected_result, out.numpy(), atol=1e-3)) + np.testing.assert_allclose(expected_result, + out.numpy(), + atol=1e-3) class API_TestMmError(unittest.TestCase):