diff --git a/test/auto_parallel/random_control_unittest.py b/test/auto_parallel/random_control_unittest.py index 6fcee26d21201372cab65a72fc0e7be63d33391e..addc38469988526c5ffea37fe86097cc24844363 100644 --- a/test/auto_parallel/random_control_unittest.py +++ b/test/auto_parallel/random_control_unittest.py @@ -86,7 +86,7 @@ class TestRandomControl(unittest.TestCase): mask_tensor_remote = paddle.ones_like(mask_tensor_local) dy_broadcast_helper(mask_tensor_remote) if equal: - assert np.array_equal( + np.testing.assert_array_equal( mask_tensor_remote.numpy(), mask_tensor_local.numpy() ) else: @@ -205,7 +205,7 @@ class TestRandomControl(unittest.TestCase): for i in range(7): mask_fw = mask_np_list[i].astype("float32") mask_rc = mask_np_list[i + 7].astype("float32") - assert np.array_equal( + np.testing.assert_array_equal( mask_fw, mask_rc, ) diff --git a/test/collective/fleet/dygraph_dist_save_load.py b/test/collective/fleet/dygraph_dist_save_load.py index 0d4b50a9eebc4b1578cac011be18db41630e4a0b..b2e6216f546120f1d805fad391513c4958d3d0c0 100644 --- a/test/collective/fleet/dygraph_dist_save_load.py +++ b/test/collective/fleet/dygraph_dist_save_load.py @@ -208,7 +208,7 @@ def step_check(path1, path2): m1 = paddle.load(path1) m2 = paddle.load(path2) for v1, v2 in zip(m1, m2): - assert np.allclose(v1.numpy(), v2.numpy()) + np.testing.assert_allclose(v1.numpy(), v2.numpy()) print(f"value same: {v1.name}") diff --git a/test/collective/fleet/dygraph_save_for_auto_infer.py b/test/collective/fleet/dygraph_save_for_auto_infer.py index 724267fbaf73644f953a0103e76790ba47488891..f184659197d52458a665d4c8d00a8cb99141cfc3 100644 --- a/test/collective/fleet/dygraph_save_for_auto_infer.py +++ b/test/collective/fleet/dygraph_save_for_auto_infer.py @@ -267,7 +267,7 @@ def step_check(output_dir): m1 = np.load(p1).reshape(-1) m2 = np.load(p2).reshape(-1) try: - assert np.allclose(m1, m2, rtol=1e-5, atol=1e-6) + np.testing.assert_allclose(m1, m2, rtol=1e-5, atol=1e-6) except: diff = m1 - m2 logger.error(f"max diff{diff.max()}, min diff: {diff.min()}") diff --git a/test/collective/fleet/fused_attention_pass_with_mp.py b/test/collective/fleet/fused_attention_pass_with_mp.py index c8a1673e6630da05cabfce24a9d13065885283b6..2f1e657cfc8be464c2755deeae326f5cfb866973 100644 --- a/test/collective/fleet/fused_attention_pass_with_mp.py +++ b/test/collective/fleet/fused_attention_pass_with_mp.py @@ -234,7 +234,7 @@ class TestFusedAttentionPassWithMP(unittest.TestCase): def test_pass(self): fused_rst = self.get_rst(use_pass=True) non_fused_rst = self.get_rst() - assert np.allclose(fused_rst, non_fused_rst, atol=1e-5) + np.testing.assert_allclose(fused_rst, non_fused_rst, atol=1e-5) if __name__ == "__main__": diff --git a/test/collective/fleet/hybrid_parallel_communicate_group.py b/test/collective/fleet/hybrid_parallel_communicate_group.py index e89e807ae4309fe075173685b69b7f92c5e7f039..1b9febf04f8749a1c2e16aa8fb1fbdf4a9e870ee 100644 --- a/test/collective/fleet/hybrid_parallel_communicate_group.py +++ b/test/collective/fleet/hybrid_parallel_communicate_group.py @@ -58,28 +58,28 @@ class TestNewGroupAPI: sync_op=True, ) if dp_rank == 0: - assert np.array_equal(result, self.tensor2) + np.testing.assert_array_equal(result, self.tensor2) elif dp_rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test scatter api ok") paddle.distributed.broadcast(result, src=1, group=dp_gp, sync_op=True) - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test broadcast api ok") paddle.distributed.reduce( result, dst=dp_src_rank, group=dp_gp, sync_op=True ) if dp_rank == 0: - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(self.tensor1, self.tensor1) ) elif dp_rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test reduce api ok") paddle.distributed.all_reduce(result, sync_op=True) - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1), ) @@ -93,8 +93,8 @@ class TestNewGroupAPI: paddle.distributed.all_gather( result, self.tensor1, group=dp_gp, sync_op=True ) - assert np.array_equal(result[0], self.tensor1) - assert np.array_equal(result[1], self.tensor1) + np.testing.assert_array_equal(result[0], self.tensor1) + np.testing.assert_array_equal(result[1], self.tensor1) print("test all_gather api ok") paddle.distributed.barrier(group=dp_gp) diff --git a/test/collective/fleet/new_group.py b/test/collective/fleet/new_group.py index 1945bae835571dbd92634ab83fd1c1ae164de855..deeea0666501906db176d5604b2153d154072e3e 100644 --- a/test/collective/fleet/new_group.py +++ b/test/collective/fleet/new_group.py @@ -36,26 +36,26 @@ class TestNewGroupAPI: result, [self.tensor2, self.tensor1], src=0, group=gp, sync_op=True ) if gp.rank == 0: - assert np.array_equal(result, self.tensor2) + np.testing.assert_array_equal(result, self.tensor2) elif gp.rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test scatter api ok") paddle.distributed.broadcast(result, src=1, group=gp, sync_op=True) - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test broadcast api ok") paddle.distributed.reduce(result, dst=0, group=gp, sync_op=True) if gp.rank == 0: - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(self.tensor1, self.tensor1) ) elif gp.rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test reduce api ok") paddle.distributed.all_reduce(result, sync_op=True) - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1), ) @@ -69,8 +69,8 @@ class TestNewGroupAPI: paddle.distributed.all_gather( result, self.tensor1, group=gp, sync_op=True ) - assert np.array_equal(result[0], self.tensor1) - assert np.array_equal(result[1], self.tensor1) + np.testing.assert_array_equal(result[0], self.tensor1) + np.testing.assert_array_equal(result[1], self.tensor1) print("test all_gather api ok") paddle.distributed.barrier(group=gp) diff --git a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 5de19dfb4113b0634488da5b594c89a72db27a88..6f71afb296efbd48c0cca7fd1389a9f59c75000e 100644 --- a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -201,8 +201,8 @@ class TestAmpScaler(unittest.TestCase): data = paddle.rand([10, 1024]) scaler = paddle.amp.AmpScaler(init_loss_scaling=1024) scaled_data = scaler.scale(data) - self.assertEqual( - np.array_equal(scaled_data.numpy(), data.numpy() * 1024), True + np.testing.assert_array_equal( + scaled_data.numpy(), data.numpy() * 1024 ) def test_scale(self): diff --git a/test/collective/multinode/dygraph_hybrid_dp.py b/test/collective/multinode/dygraph_hybrid_dp.py index 3d2f8f9e5d3b3bf24774837b22dfc1a70da99a19..d31177d9813c096e86937f2b700a4f6d83223a0e 100644 --- a/test/collective/multinode/dygraph_hybrid_dp.py +++ b/test/collective/multinode/dygraph_hybrid_dp.py @@ -39,7 +39,7 @@ class TestDygrapgHybridDP(TestCollectiveAPIRunnerBase): paddle.distributed.collective.all_reduce(data_part) data_reduced = data_part data_sumed = np.sum(data, axis=0) - assert np.allclose( + np.testing.assert_allclose( data_sumed, data_reduced.numpy(), rtol=1e-8, atol=1e-8 ) diff --git a/test/collective/multinode/dygraph_hybrid_dpppmp.py b/test/collective/multinode/dygraph_hybrid_dpppmp.py index 55a130e3f6ad7a5e31adad5bc0d85a604373c672..4a52d305090b8fffac4bb40f24b110a69e5920ba 100644 --- a/test/collective/multinode/dygraph_hybrid_dpppmp.py +++ b/test/collective/multinode/dygraph_hybrid_dpppmp.py @@ -197,7 +197,9 @@ class TestDygrapgHybridDPPPMP(TestCollectiveAPIRunnerBase): loss_base_arr.append(loss_base.numpy()) loss_hybrid_arr.append(loss.numpy()) - assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose( + loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5 + ) if __name__ == "__main__": diff --git a/test/collective/multinode/dygraph_hybrid_fp16.py b/test/collective/multinode/dygraph_hybrid_fp16.py index 34319a6c018c6d734b964e52524c17f6fbba78a7..6bd3e4390a190213d5646b720fc9ffb5b67b7ec2 100644 --- a/test/collective/multinode/dygraph_hybrid_fp16.py +++ b/test/collective/multinode/dygraph_hybrid_fp16.py @@ -210,7 +210,9 @@ class TestDygraphHybridFp16(TestCollectiveAPIRunnerBase): loss_base_arr.append(loss_base.numpy()) loss_hybrid_arr.append(loss) - assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-3, atol=1e-3) + np.testing.assert_allclose( + loss_base_arr, loss_hybrid_arr, rtol=1e-3, atol=1e-3 + ) if __name__ == "__main__": diff --git a/test/collective/multinode/dygraph_hybrid_recompute.py b/test/collective/multinode/dygraph_hybrid_recompute.py index 1902b716296d84c76423ad6adf5982f5d29c03c0..6de5e336acf3fa2177b72328f61526310fbb1df1 100644 --- a/test/collective/multinode/dygraph_hybrid_recompute.py +++ b/test/collective/multinode/dygraph_hybrid_recompute.py @@ -186,7 +186,9 @@ class TestDygrapgHybridRecompute(TestCollectiveAPIRunnerBase): loss_base_arr.append(loss_base.numpy()) loss_hybrid_arr.append(loss) - assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose( + loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5 + ) if __name__ == "__main__": diff --git a/test/collective/process_group_gloo.py b/test/collective/process_group_gloo.py index 20dcae5928ad977a627cf87cf07f411923d469c4..b6ae187cc5ffaaddf9ed614e40f8f98a66a4434f 100644 --- a/test/collective/process_group_gloo.py +++ b/test/collective/process_group_gloo.py @@ -76,11 +76,11 @@ class TestProcessGroupFp32(unittest.TestCase): if rank == 0: task = pg.allreduce(tensor_x, core.ReduceOp.MAX) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = pg.allreduce(tensor_y, core.ReduceOp.MAX) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -95,10 +95,10 @@ class TestProcessGroupFp32(unittest.TestCase): broadcast_result = paddle.assign(tensor_x) if rank == 0: task = pg.broadcast(tensor_x, 0) - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = pg.broadcast(tensor_y, 0) - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) print("test broadcast api ok") # test send_recv @@ -116,11 +116,11 @@ class TestProcessGroupFp32(unittest.TestCase): task = pg.send(tensor_x, pg.size() - 1, True) elif pg.rank() == pg.size() - 1: task = pg.recv(tensor_y_1, 0, True) - assert np.array_equal(send_recv_result_1, tensor_y_1) + np.testing.assert_array_equal(send_recv_result_1, tensor_y_1) if pg.rank() == 0: task = pg.recv(tensor_x, pg.size() - 1, True) - assert np.array_equal(send_recv_result_2, tensor_x) + np.testing.assert_array_equal(send_recv_result_2, tensor_x) elif pg.rank() == pg.size() - 1: task = pg.send(tensor_y_2, 0, True) print("test send_recv api ok") @@ -159,8 +159,8 @@ class TestProcessGroupFp32(unittest.TestCase): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api ok\n") # test Reduce @@ -178,7 +178,7 @@ class TestProcessGroupFp32(unittest.TestCase): task = pg.reduce(tensor_y, 0) task.wait() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") # test Scatter @@ -199,9 +199,9 @@ class TestProcessGroupFp32(unittest.TestCase): out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2) + np.testing.assert_array_equal(tensor_y, out2) print("test scatter api ok\n") # test Gather @@ -219,7 +219,7 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == root: task = pg.gather(tensor_y[root], tensor_x, root, True) task.wait() - assert np.array_equal(tensor_x, tensor_y) + np.testing.assert_array_equal(tensor_x, tensor_y) else: task = pg.gather(tensor_y[pg.rank()], tensor_x, root, True) task.wait() diff --git a/test/collective/process_group_mpi.py b/test/collective/process_group_mpi.py index f2fc9c498b4e82b2511f6c607f21bc5ec4f4f728..b5d9fd4b743772bbe5578169d76dabce95fa9c28 100644 --- a/test/collective/process_group_mpi.py +++ b/test/collective/process_group_mpi.py @@ -69,10 +69,10 @@ def test_allreduce_sum(pg, shape, dtype): sum_result = tensor_x + tensor_y if pg.rank() == 0: task = dist.all_reduce(tensor_x) - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) else: task = dist.all_reduce(tensor_y) - assert np.array_equal(tensor_y, sum_result) + np.testing.assert_array_equal(tensor_y, sum_result) print("test allreduce sum api ok") @@ -91,13 +91,13 @@ def test_allreduce_max(pg, shape, dtype): tensor_x, dist.ReduceOp.MAX, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.all_reduce( tensor_y, dist.ReduceOp.MAX, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -116,13 +116,13 @@ def test_allreduce_min(pg, shape, dtype): tensor_x, dist.ReduceOp.MIN, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.all_reduce( tensor_y, dist.ReduceOp.MIN, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_y, min_result) + np.testing.assert_array_equal(tensor_y, min_result) print("test allreduce min api ok") @@ -141,13 +141,13 @@ def test_allreduce_prod(pg, shape, dtype): tensor_x, dist.ReduceOp.PROD, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.all_reduce( tensor_y, dist.ReduceOp.PROD, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_y, prod_result) + np.testing.assert_array_equal(tensor_y, prod_result) print("test allreduce prod api ok") @@ -164,10 +164,10 @@ def test_broadcast(pg, shape, dtype): task = dist.broadcast(tensor_x, 0, use_calc_stream=False) task.synchronize() assert task.is_completed() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) print("test broadcast api ok") @@ -205,8 +205,8 @@ def test_allgather(pg, shape, dtype): tensor_out = paddle.concat(tensor_out_list) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api ok\n") if pg.rank() == 0: @@ -219,8 +219,8 @@ def test_allgather(pg, shape, dtype): tensor_out = paddle.concat(tensor_out_list) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api2 ok\n") @@ -249,9 +249,9 @@ def test_all2all(pg, shape, dtype): out1_2 = paddle.slice(tensor_out1, [0], [shape[0] // 2], [shape[0]]) out2_1 = paddle.slice(tensor_out2, [0], [0], [shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api ok\n") x = np.random.random(shape).astype(dtype) @@ -277,9 +277,9 @@ def test_all2all(pg, shape, dtype): out1_2 = paddle.slice(tensor_out1, [0], [shape[0] // 2], [shape[0]]) out2_1 = paddle.slice(tensor_out2, [0], [0], [shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api2 ok\n") @@ -297,7 +297,7 @@ def test_reduce_sum(pg, shape, dtype): task = dist.reduce(tensor_y, 0, use_calc_stream=False) task.wait() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") @@ -316,7 +316,7 @@ def test_reduce_max(pg, shape, dtype): tensor_x, 0, dist.ReduceOp.MAX, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.reduce( tensor_y, 0, dist.ReduceOp.MAX, use_calc_stream=False @@ -340,7 +340,7 @@ def test_reduce_min(pg, shape, dtype): tensor_x, 0, dist.ReduceOp.MIN, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.reduce( tensor_y, 0, dist.ReduceOp.MIN, use_calc_stream=False @@ -364,7 +364,7 @@ def test_reduce_prod(pg, shape, dtype): tensor_x, 0, dist.ReduceOp.PROD, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.reduce( tensor_y, 0, dist.ReduceOp.PROD, use_calc_stream=False @@ -391,9 +391,9 @@ def test_scatter(pg, shape, dtype): out1 = paddle.slice(tensor_x, [0], [0], [shape[0]]) out2 = paddle.slice(tensor_x, [0], [shape[0]], [shape[0] * 2]) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2) + np.testing.assert_array_equal(tensor_y, out2) print("test scatter api ok\n") @@ -411,7 +411,7 @@ def test_send_recv(pg, sub_group, shape, dtype): elif pg.rank() == 1: task = dist.recv(tensor_y, 0, group=sub_group, use_calc_stream=False) task.wait() - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") @@ -427,7 +427,7 @@ def test_send_recv(pg, sub_group, shape, dtype): task = dist.send(tensor_x, 1, group=sub_group, use_calc_stream=True) elif pg.rank() == 1: task = dist.recv(tensor_y, 0, group=sub_group, use_calc_stream=True) - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") diff --git a/test/collective/process_group_nccl.py b/test/collective/process_group_nccl.py index 713e0a01b4abbf5be4c5306c5c3603376333a933..67815eab2bc8346263b190d4050232598b5777d8 100644 --- a/test/collective/process_group_nccl.py +++ b/test/collective/process_group_nccl.py @@ -64,10 +64,10 @@ class TestProcessGroupFp32(unittest.TestCase): sum_result = tensor_x + tensor_y if pg.rank() == 0: task = dist.all_reduce(tensor_x) - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) else: task = dist.all_reduce(tensor_y) - assert np.array_equal(tensor_y, sum_result) + np.testing.assert_array_equal(tensor_y, sum_result) print("test allreduce sum api ok") @@ -82,10 +82,10 @@ class TestProcessGroupFp32(unittest.TestCase): sum_result = tensor_x + tensor_y if pg.rank() == 0: task = dist.all_reduce(tensor_x) - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) else: task = dist.all_reduce(tensor_y) - assert np.array_equal(tensor_y, sum_result) + np.testing.assert_array_equal(tensor_y, sum_result) print("test allreduce sum api with = [] ok") @@ -102,11 +102,11 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -123,11 +123,11 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api with shape = [] ok") @@ -144,11 +144,11 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_y, min_result) + np.testing.assert_array_equal(tensor_y, min_result) print("test allreduce min api ok") @@ -165,11 +165,11 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_y, min_result) + np.testing.assert_array_equal(tensor_y, min_result) print("test allreduce min api with shape [] ok") @@ -186,11 +186,11 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_y, prod_result) + np.testing.assert_array_equal(tensor_y, prod_result) print("test allreduce prod api ok") @@ -207,11 +207,11 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_y, prod_result) + np.testing.assert_array_equal(tensor_y, prod_result) print("test allreduce prod api with shape = [] ok") @@ -229,11 +229,11 @@ class TestProcessGroupFp32(unittest.TestCase): task.synchronize() paddle.device.cuda.synchronize() assert task.is_completed() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) paddle.device.cuda.synchronize() - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) print("test broadcast api ok") @@ -251,11 +251,11 @@ class TestProcessGroupFp32(unittest.TestCase): task.synchronize() paddle.device.cuda.synchronize() assert task.is_completed() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) paddle.device.cuda.synchronize() - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) assert tensor_y.shape == [] print("test broadcast api with shape=[] ok") @@ -298,8 +298,8 @@ class TestProcessGroupFp32(unittest.TestCase): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api ok\n") if pg.rank() == 0: @@ -316,8 +316,8 @@ class TestProcessGroupFp32(unittest.TestCase): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api2 ok\n") # test allgather with shape = [] @@ -337,8 +337,8 @@ class TestProcessGroupFp32(unittest.TestCase): paddle.device.cuda.synchronize() out_1 = tensor_out_list[0] out_2 = tensor_out_list[1] - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api with shape [] ok\n") # test alltoall @@ -371,9 +371,11 @@ class TestProcessGroupFp32(unittest.TestCase): ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal( + out1_2.numpy(), raw_tensor_y_1.numpy() + ) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api ok\n") x = np.random.random(self.shape).astype(self.dtype) @@ -404,9 +406,11 @@ class TestProcessGroupFp32(unittest.TestCase): ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal( + out1_2.numpy(), raw_tensor_y_1.numpy() + ) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api2 ok\n") # test Reduce @@ -425,7 +429,7 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() paddle.device.cuda.synchronize() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") # test reduce max @@ -441,7 +445,7 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.reduce(tensor_x, 0, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.reduce(tensor_y, 0, dist.ReduceOp.MAX, sync_op=False) task.wait() @@ -461,7 +465,7 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.reduce(tensor_x, 0, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.reduce(tensor_y, 0, dist.ReduceOp.MIN, sync_op=False) task.wait() @@ -481,7 +485,7 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: task = dist.reduce(tensor_x, 0, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.reduce(tensor_y, 0, dist.ReduceOp.PROD, sync_op=False) task.wait() @@ -511,9 +515,9 @@ class TestProcessGroupFp32(unittest.TestCase): out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2) + np.testing.assert_array_equal(tensor_y, out2) print("test scatter api ok\n") # test Scatter with shape=[] @@ -534,9 +538,9 @@ class TestProcessGroupFp32(unittest.TestCase): out1 = paddle.assign(tensor_x) out2 = paddle.assign(tensor_x + 1) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2), f"{tensor_y}, {out2}" + np.testing.assert_array_equal(tensor_y, out2) assert tensor_y.shape == [] print("test scatter api with shape=[] ok\n") @@ -554,7 +558,7 @@ class TestProcessGroupFp32(unittest.TestCase): else: task = dist.recv(tensor_y, 0, sync_op=False) task.wait() - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") @@ -570,7 +574,7 @@ class TestProcessGroupFp32(unittest.TestCase): task = dist.send(tensor_x, 1, sync_op=True) else: task = dist.recv(tensor_y, 0, sync_op=True) - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") diff --git a/test/collective/strategy_group.py b/test/collective/strategy_group.py index 5197cf632b402a2531bc27eeedb1b7500516a6d5..a70fbdf151822dae6305e7adc2a8aa4949d93127 100644 --- a/test/collective/strategy_group.py +++ b/test/collective/strategy_group.py @@ -31,7 +31,7 @@ def _check_using_all_reduce(group): data = paddle.to_tensor([1, 2, 3]) result = paddle.to_tensor([2, 4, 6]) dist.all_reduce(data, group=group) - assert np.array_equal(data, result) + np.testing.assert_array_equal(data, result) def _check_using_send(group, dst): @@ -43,7 +43,7 @@ def _check_using_recv(group, src): result = paddle.to_tensor([1, 2, 3]) data = paddle.to_tensor([0, 0, 0]) dist.recv(data, src=src, group=group) - assert np.array_equal(data, result) + np.testing.assert_array_equal(data, result) class TestStrategyGroupAPI(unittest.TestCase): diff --git a/test/dygraph_to_static/test_cycle_gan.py b/test/dygraph_to_static/test_cycle_gan.py index 7974371fc0f13ad818f086826f392ab134509ab5..19c14115afc0f8ebda24a218d522abab0a418305 100644 --- a/test/dygraph_to_static/test_cycle_gan.py +++ b/test/dygraph_to_static/test_cycle_gan.py @@ -698,17 +698,13 @@ class TestCycleGANModel(unittest.TestCase): st_out = self.train(to_static=True) dy_out = self.train(to_static=False) - assert_func = np.allclose # Note(Aurelius84): Because we disable BN on GPU, # but here we enhance the check on CPU by `np.array_equal` # which means the dy_out and st_out shall be exactly same. if not fluid.is_compiled_with_cuda(): - assert_func = np.array_equal - - self.assertTrue( - assert_func(dy_out, st_out), - msg=f"dy_out:\n {dy_out}\n st_out:\n{st_out}", - ) + np.testing.assert_array_equal(dy_out, st_out) + else: + np.testing.assert_allclose(dy_out, st_out, rtol=1e-5, atol=1e-8) if __name__ == "__main__": diff --git a/test/ir/inference/test_inference_predictor_run.py b/test/ir/inference/test_inference_predictor_run.py index 99ba29956c5dad7d97ac4f8a68018a56bd9b9211..c6a8c5db9f3c1d5fc96f33bdd93fdade71cd8baf 100644 --- a/test/ir/inference/test_inference_predictor_run.py +++ b/test/ir/inference/test_inference_predictor_run.py @@ -119,7 +119,7 @@ class TestPredictorRunWithTensor(unittest.TestCase): inorder_output = self.get_inorder_output() disorder_output = self.get_disorder_output() - assert np.allclose( + np.testing.assert_allclose( inorder_output.numpy().flatten(), disorder_output.numpy().flatten() ) diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index dde1a05cf72e425327e0fc63531fdb55b6e617a8..144b7fdcaa4e5875d25ad195ad90d92e19369dd9 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -3373,9 +3373,15 @@ class TestPow_factor_tensor(TestActivation): fetch_list=[out_1, out_2, res, out_6], ) - assert np.allclose(res_1, np.power(input, 2)) - assert np.allclose(res_2, np.power(input, 3)) - assert np.allclose(res_6, np.power(input, 3)) + np.testing.assert_allclose( + res_1, np.power(input, 2), rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, np.power(input, 3), rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_6, np.power(input, 3), rtol=1e-5, atol=1e-8 + ) def ref_stanh(x, scale_a=0.67, scale_b=1.7159): diff --git a/test/legacy_test/test_adaptive_avg_pool2d.py b/test/legacy_test/test_adaptive_avg_pool2d.py index a2b0066235f87b9bef491db5dc8e8f9f088f4c17..663ac74781597c655941e5f4fb129911e99ffbf6 100644 --- a/test/legacy_test/test_adaptive_avg_pool2d.py +++ b/test/legacy_test/test_adaptive_avg_pool2d.py @@ -148,15 +148,21 @@ class TestAdaptiveAvgPool2DAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -169,36 +175,38 @@ class TestAdaptiveAvgPool2DAPI(unittest.TestCase): out_1 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3] ) - out_2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=5) - out_3 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[2, 5] ) - out_4 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3], data_format="NHWC" ) - out_5 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[None, 3] ) - out_6 = paddle.nn.functional.interpolate( x=x, mode="area", size=[2, 5] ) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) - - assert np.allclose(out_6.numpy(), self.res_3_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_6.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): @@ -260,15 +268,21 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -297,15 +311,21 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): ) out_5 = adaptive_avg_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) class TestOutputSizeTensor(UnittestBase): diff --git a/test/legacy_test/test_adaptive_avg_pool3d.py b/test/legacy_test/test_adaptive_avg_pool3d.py index 99afe85996ce6dbea25f7905a96f7b384f62098b..d5054ba2107af2081b1ec7c471b90de8f36765a5 100755 --- a/test/legacy_test/test_adaptive_avg_pool3d.py +++ b/test/legacy_test/test_adaptive_avg_pool3d.py @@ -169,15 +169,21 @@ class TestAdaptiveAvgPool3DAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -209,17 +215,24 @@ class TestAdaptiveAvgPool3DAPI(unittest.TestCase): x=x, mode="area", size=[2, 3, 5] ) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) - - assert np.allclose(out_6.numpy(), self.res_3_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_6.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): @@ -288,15 +301,21 @@ class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -329,15 +348,21 @@ class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): ) out_5 = adaptive_avg_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) if __name__ == '__main__': diff --git a/test/legacy_test/test_adaptive_max_pool2d.py b/test/legacy_test/test_adaptive_max_pool2d.py index 62a4dee7e58a0f9050fd25b5da67ddd83273b0b0..104271b955257ed5a616e7549bda3e1de4223dbb 100644 --- a/test/legacy_test/test_adaptive_max_pool2d.py +++ b/test/legacy_test/test_adaptive_max_pool2d.py @@ -149,15 +149,15 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # assert np.allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -184,15 +184,15 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): x=x, output_size=[None, 3] ) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) - # assert np.allclose(out_4.numpy(), self.res_4_np) + # np.testing.assert_allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): @@ -255,15 +255,15 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # assert np.allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -291,15 +291,15 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): ) out_5 = adaptive_max_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) - # assert np.allclose(out_4.numpy(), self.res_4_np) + # np.testing.assert_allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestOutDtype(unittest.TestCase): diff --git a/test/legacy_test/test_adaptive_max_pool3d.py b/test/legacy_test/test_adaptive_max_pool3d.py index f22196409700102e5be487695dcc2e2dd112eb19..13eed4823d88f30013b8bae186c58e60d38eeb69 100755 --- a/test/legacy_test/test_adaptive_max_pool3d.py +++ b/test/legacy_test/test_adaptive_max_pool3d.py @@ -170,15 +170,15 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # assert np.allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -205,15 +205,15 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): x=x, output_size=[None, 3, None] ) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) - # assert np.allclose(out_4.numpy(), self.res_4_np) + # np.testing.assert_allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): @@ -280,15 +280,15 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) # assert np.allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -320,15 +320,15 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): ) out_5 = adaptive_max_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) # assert np.allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestOutDtype(unittest.TestCase): diff --git a/test/legacy_test/test_addmm_op.py b/test/legacy_test/test_addmm_op.py index 66a86961e885d8553f2722b52af124946a8de750..1f92270cbeeac9d4b2ec77b18d02e2fff1742d4e 100644 --- a/test/legacy_test/test_addmm_op.py +++ b/test/legacy_test/test_addmm_op.py @@ -328,7 +328,9 @@ class TestAddMMOp5(unittest.TestCase): x = fluid.dygraph.to_variable(np_x) y = fluid.dygraph.to_variable(np_y) out = paddle.tensor.addmm(input, x, y) - assert np.allclose(np_input + np.dot(np_x, np_y), out.numpy()) + np.testing.assert_allclose( + np_input + np.dot(np_x, np_y), out.numpy(), rtol=1e-5, atol=1e-8 + ) class TestAddMMAPI(unittest.TestCase): diff --git a/test/legacy_test/test_batch_norm_op_v2.py b/test/legacy_test/test_batch_norm_op_v2.py index a55c478996678ccc29a5e47061ddc6a24fa5360d..618513a0d044bdfbf9141e755ce54f98bb9a2003 100644 --- a/test/legacy_test/test_batch_norm_op_v2.py +++ b/test/legacy_test/test_batch_norm_op_v2.py @@ -173,7 +173,7 @@ class TestBatchNorm(unittest.TestCase): bn = paddle.nn.BatchNorm2D(shape[1]) eag_y = bn(paddle.to_tensor(x)) - assert np.allclose(eag_y.numpy(), y.numpy()) + np.testing.assert_allclose(eag_y.numpy(), y.numpy()) return y.numpy() def compute_v3(x, is_test, trainable_statistics): @@ -351,10 +351,10 @@ class TestBatchNormChannelLast(unittest.TestCase): y.backward() y2.backward() - assert np.allclose( + np.testing.assert_allclose( y.numpy().flatten(), y2.numpy().flatten(), atol=1e-5, rtol=1e-5 ) - assert np.allclose( + np.testing.assert_allclose( bn1d.weight.grad.numpy().flatten(), bn2d.weight.grad.numpy().flatten(), atol=1e-5, diff --git a/test/legacy_test/test_broadcast_to_op.py b/test/legacy_test/test_broadcast_to_op.py index 63d3b9141751203a22a609d206896b24b80943b9..e2da6a11172966d8be50f18d77fd0a10aa07a6ff 100644 --- a/test/legacy_test/test_broadcast_to_op.py +++ b/test/legacy_test/test_broadcast_to_op.py @@ -66,9 +66,9 @@ class TestBroadcastToAPI(unittest.TestCase): }, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) def test_api_fp16_gpu(self): if paddle.fluid.core.is_compiled_with_cuda(): @@ -101,9 +101,9 @@ class TestBroadcastToAPI(unittest.TestCase): }, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) if __name__ == "__main__": diff --git a/test/legacy_test/test_channel_shuffle.py b/test/legacy_test/test_channel_shuffle.py index efecb886a757f8fa948a3067d958893953fff19c..f8b6ef1df9514b5d675b299783672c695d9e6994 100644 --- a/test/legacy_test/test_channel_shuffle.py +++ b/test/legacy_test/test_channel_shuffle.py @@ -120,8 +120,8 @@ class TestChannelShuffleAPI(unittest.TestCase): use_prune=True, ) - assert np.allclose(res_1, self.out_1_np) - assert np.allclose(res_2, self.out_2_np) + np.testing.assert_allclose(res_1[0], self.out_1_np) + np.testing.assert_allclose(res_2[0], self.out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): @@ -160,8 +160,8 @@ class TestChannelShuffleAPI(unittest.TestCase): use_prune=True, ) - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + np.testing.assert_allclose(res_1[0], out_1_np) + np.testing.assert_allclose(res_2[0], out_2_np) def run_dygraph(self, groups, data_format): n, c, h, w = 2, 9, 4, 4 diff --git a/test/legacy_test/test_concat_op.py b/test/legacy_test/test_concat_op.py index 1176ba32b20d47b68678239298756088d776d4d3..db848d7b5cff43f7e75207f012f945ebbb15057a 100644 --- a/test/legacy_test/test_concat_op.py +++ b/test/legacy_test/test_concat_op.py @@ -472,9 +472,15 @@ class TestConcatAPI(unittest.TestCase): feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) + np.testing.assert_array_equal( + res_1, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_2, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_3, np.concatenate((input_2, input_3), axis=1) + ) def test_api(self): paddle.enable_static() @@ -501,10 +507,18 @@ class TestConcatAPI(unittest.TestCase): feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, fetch_list=[out_1, out_2, out_3, out_4], ) - assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_4, np.concatenate((input_2, input_3), axis=1)) + np.testing.assert_array_equal( + res_1, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_2, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_3, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_4, np.concatenate((input_2, input_3), axis=1) + ) def test_imperative(self): in1 = np.array([[1, 2, 3], [4, 5, 6]]) diff --git a/test/legacy_test/test_cuda_graph_partial_graph_static_run.py b/test/legacy_test/test_cuda_graph_partial_graph_static_run.py index 822bef54b5a63bfb3a7cfd1453aa870059042bfd..2e301bdbd94da56e59fa60b7cc98221923934555 100644 --- a/test/legacy_test/test_cuda_graph_partial_graph_static_run.py +++ b/test/legacy_test/test_cuda_graph_partial_graph_static_run.py @@ -124,7 +124,7 @@ class TestCudaGraphAttrAll(unittest.TestCase): x_data = np.random.random((3, 10)).astype('float32') cuda_graph_rst = self.run_with_cuda_graph(x_data) normal_run_rst = self.normal_run(x_data) - assert np.array_equal(cuda_graph_rst, normal_run_rst) + np.testing.assert_array_equal(cuda_graph_rst, normal_run_rst) if __name__ == "__main__": diff --git a/test/legacy_test/test_detach.py b/test/legacy_test/test_detach.py index a17178ac28716e56d05650bd3587d4abcb6e0159..4b3d25a1cde7352d9f5d494f346dd2bde2941521 100644 --- a/test/legacy_test/test_detach.py +++ b/test/legacy_test/test_detach.py @@ -172,7 +172,9 @@ class Test_Detach(unittest.TestCase): def test_NoDetachSingle_DetachMulti(self): array_no_detach_single = self.no_detach_single() array_detach_multi = self.detach_multi() - assert np.array_equal(array_no_detach_single, array_detach_multi) + np.testing.assert_array_equal( + array_no_detach_single, array_detach_multi + ) class TestInplace(unittest.TestCase): diff --git a/test/legacy_test/test_egr_python_api.py b/test/legacy_test/test_egr_python_api.py index 8e7d07f8d2f41899e031778399cbff33ab852cd5..10f86571af1019636353f172c91c06d3f34d21a0 100644 --- a/test/legacy_test/test_egr_python_api.py +++ b/test/legacy_test/test_egr_python_api.py @@ -844,6 +844,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ori_place = egr_tensor.place new_arr = np.random.rand(4, 16, 16, 32).astype('float32') + self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr)) egr_tensor.set_value(new_arr) @@ -964,6 +965,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): linear = paddle.nn.Linear(1, 3) ori_place = linear.weight.place new_weight = np.ones([1, 3]).astype('float32') + self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight)) linear.weight.set_value(new_weight) diff --git a/test/legacy_test/test_einsum_v2.py b/test/legacy_test/test_einsum_v2.py index 434c59b5b804eceaa54caca3d723bd59f83b022a..6b4bf6958b946944c044bdb5689f4291a7e200ed 100644 --- a/test/legacy_test/test_einsum_v2.py +++ b/test/legacy_test/test_einsum_v2.py @@ -581,7 +581,7 @@ class TestSimpleUndiagonal(unittest.TestCase): A = paddle.to_tensor(np.array([1.0, 2.0])) A_expect = paddle.to_tensor([[1.0, 0.0], [0.0, 2.0]]) A_actual = paddle.einsum('i->ii', A) - assert np.array_equal(A_expect.numpy(), A_actual.numpy()) + np.testing.assert_array_equal(A_expect.numpy(), A_actual.numpy()) class TestSimpleUndiagonal2(unittest.TestCase): @@ -595,7 +595,7 @@ class TestSimpleUndiagonal2(unittest.TestCase): B = paddle.to_tensor(np.array([1.0, 1.0])) A_expect = paddle.to_tensor([[2.0, 0.0], [0.0, 4.0]]) A_actual = paddle.einsum('i,j->ii', A, B) - assert np.array_equal(A_expect.numpy(), A_actual.numpy()) + np.testing.assert_array_equal(A_expect.numpy(), A_actual.numpy()) class TestSimpleComplexGrad(unittest.TestCase): diff --git a/test/legacy_test/test_expand_as_v2_op.py b/test/legacy_test/test_expand_as_v2_op.py index db866144eaf96190c82241886b576a0be14814f7..68c9801acb4d8a56867166b9d5b0db7beefb5529 100755 --- a/test/legacy_test/test_expand_as_v2_op.py +++ b/test/legacy_test/test_expand_as_v2_op.py @@ -280,7 +280,7 @@ class TestExpandAsV2API(unittest.TestCase): feed={"x": input1, "target_tensor": input2}, fetch_list=[out_1], ) - assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) + np.testing.assert_array_equal(res_1[0], np.tile(input1, (2, 1, 1))) if __name__ == "__main__": diff --git a/test/legacy_test/test_expand_v2_op.py b/test/legacy_test/test_expand_v2_op.py index 128bdda6da0198594934647e6d8f5d4063f2337c..9b9f6b631a2bf5e63988c3f4ef59c597a23aee55 100644 --- a/test/legacy_test/test_expand_v2_op.py +++ b/test/legacy_test/test_expand_v2_op.py @@ -318,9 +318,9 @@ class TestExpandV2API(unittest.TestCase): }, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) class TestExpandInferShape(unittest.TestCase): diff --git a/test/legacy_test/test_fill_constant_op.py b/test/legacy_test/test_fill_constant_op.py index e74b24c6e5c197a43242e8d8663f4617ff6277e7..614cd29668d88c1edd3e2bfe97ef20710e411ade 100644 --- a/test/legacy_test/test_fill_constant_op.py +++ b/test/legacy_test/test_fill_constant_op.py @@ -338,14 +338,30 @@ class TestFillConstantAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], ) - assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_4, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_7, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_8, np.full([1, 2], 1.1, dtype="float32")) + np.testing.assert_array_equal( + res_1, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_2, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_3, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_4, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_5, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_6, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_7, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_8, np.full([1, 2], 1.1, dtype="float32") + ) class TestFillConstantImperative(unittest.TestCase): @@ -369,16 +385,16 @@ class TestFillConstantImperative(unittest.TestCase): res4 = paddle.tensor.fill_constant( shape=shape, dtype='int32', value=value ) - assert np.array_equal( + np.testing.assert_array_equal( res1.numpy(), np.full([1, 2], 1.1, dtype="float32") ) - assert np.array_equal( + np.testing.assert_array_equal( res2.numpy(), np.full([1, 2], 1.1, dtype="float32") ) - assert np.array_equal( + np.testing.assert_array_equal( res3.numpy(), np.full([1, 2], 1.1, dtype="float32") ) - assert np.array_equal( + np.testing.assert_array_equal( res4.numpy(), np.full([1, 2], 88, dtype="int32") ) diff --git a/test/legacy_test/test_flatten_op.py b/test/legacy_test/test_flatten_op.py index 76f8ef4a1a462db6aff545d2bfa7c1de7a1491b5..0803db60c73c02dc4ad373bf444c8964a485f51e 100644 --- a/test/legacy_test/test_flatten_op.py +++ b/test/legacy_test/test_flatten_op.py @@ -90,7 +90,7 @@ class TestFlattenOpFP16(unittest.TestCase): fetch_list=[y], ) - assert np.array_equal(res[0].shape, [12 * 14]) + np.testing.assert_array_equal(res[0].shape, [12 * 14]) if __name__ == "__main__": diff --git a/test/legacy_test/test_full_op.py b/test/legacy_test/test_full_op.py index 473289447410ad615d30248005af1db79800cac3..9a5c95044927f5c6caba3148a97c1f932ba5bfea 100644 --- a/test/legacy_test/test_full_op.py +++ b/test/legacy_test/test_full_op.py @@ -74,13 +74,27 @@ class TestFullAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], ) - assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_4, np.full([1, 2], 1.2, dtype="float32")) - assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_7, np.full([1, 2], 1.1, dtype="float32")) + np.testing.assert_array_equal( + res_1, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_2, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_3, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_4, np.full([1, 2], 1.2, dtype="float32") + ) + np.testing.assert_array_equal( + res_5, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_6, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_7, np.full([1, 2], 1.1, dtype="float32") + ) def test_api_eager(self): with fluid.dygraph.base.guard(): @@ -134,18 +148,36 @@ class TestFullAPI(unittest.TestCase): out_7, dtype=np.float32, fill_value=np.abs(1.1) ) - assert np.array_equal(out_1, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_2, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_3, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_4, np.full([1, 2], 1.2, dtype="float32")) - assert np.array_equal(out_5, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_6, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_7, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32")) - assert np.array_equal( + np.testing.assert_array_equal( + out_1, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_2, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_3, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_4, np.full([1, 2], 1.2, dtype="float32") + ) + np.testing.assert_array_equal( + out_5, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_6, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_7, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_8, np.full([2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( out_9, np.full([2, 2, 4], 1.1, dtype="float32") ) - assert np.array_equal(out_10, np.full([1, 2], 1.1, dtype="float32")) + np.testing.assert_array_equal( + out_10, np.full([1, 2], 1.1, dtype="float32") + ) class TestFullOpError(unittest.TestCase): diff --git a/test/legacy_test/test_fused_attention_pass.py b/test/legacy_test/test_fused_attention_pass.py index 0f64bf3458a121d50672bca2a87980a221bbe609..263ff746c710f48b913141bb3a454c3cf89969c0 100644 --- a/test/legacy_test/test_fused_attention_pass.py +++ b/test/legacy_test/test_fused_attention_pass.py @@ -185,7 +185,9 @@ class TestFusedAttentionPass(unittest.TestCase): def test_pass(self): fused_rst = self.get_rst(use_pass=True) non_fused_rst = self.get_rst() - assert np.allclose(fused_rst, non_fused_rst) + np.testing.assert_allclose( + fused_rst, non_fused_rst, rtol=1e-5, atol=1e-8 + ) if __name__ == "__main__": diff --git a/test/legacy_test/test_fused_feedforward_pass.py b/test/legacy_test/test_fused_feedforward_pass.py index e72de143b37bfc9ca3edfda0bbaf148a981219c1..1abbfec2201bdbf2da2b115a8e2b5d259bbc598b 100644 --- a/test/legacy_test/test_fused_feedforward_pass.py +++ b/test/legacy_test/test_fused_feedforward_pass.py @@ -165,7 +165,9 @@ class TestFusedFeedforwadPass(unittest.TestCase): self.use_dropout_2 = use_dropout_2 ret_loss = self.get_value() ret_loss_fused = self.get_value(use_pass=True) - assert np.allclose(ret_loss, ret_loss_fused) + np.testing.assert_allclose( + ret_loss, ret_loss_fused, rtol=1e-5, atol=1e-8 + ) if __name__ == "__main__": diff --git a/test/legacy_test/test_imperative_layer_children.py b/test/legacy_test/test_imperative_layer_children.py index b0fb822f48c0ec9f59f575c15bcc0a1fbdfc90a7..bf440d511c5668451a8e41cb363ecc62e2c2d0fe 100644 --- a/test/legacy_test/test_imperative_layer_children.py +++ b/test/legacy_test/test_imperative_layer_children.py @@ -60,8 +60,8 @@ class TestLayerChildren(unittest.TestCase): self.ori_y1, self.ori_y2 = self.func_apply_init_weight() # compare ori dygraph and new egr - assert np.array_equal(self.ori_y1.numpy(), self.new_y1.numpy()) - assert np.array_equal(self.ori_y2.numpy(), self.new_y2.numpy()) + np.testing.assert_array_equal(self.ori_y1.numpy(), self.new_y1.numpy()) + np.testing.assert_array_equal(self.ori_y2.numpy(), self.new_y2.numpy()) if __name__ == '__main__': diff --git a/test/legacy_test/test_imperative_numpy_bridge.py b/test/legacy_test/test_imperative_numpy_bridge.py index 58059a295539dfb04a30428abe7016ea1ee0acc2..0adb69adcf878a4f6d8fe5817d9a161cd1d4cf05 100644 --- a/test/legacy_test/test_imperative_numpy_bridge.py +++ b/test/legacy_test/test_imperative_numpy_bridge.py @@ -44,6 +44,7 @@ class TestImperativeNumpyBridge(unittest.TestCase): data_np[0][0] = -1 self.assertEqual(data_np[0][0], -1) self.assertNotEqual(var2[0][0].numpy(), -1) + self.assertFalse(np.array_equal(var2.numpy(), data_np)) diff --git a/test/legacy_test/test_layers.py b/test/legacy_test/test_layers.py index 44986f0b122ab739c84d66fde28864fcdcfa045c..e345be328b505e3b7e05f4ba7873eab19d561430 100644 --- a/test/legacy_test/test_layers.py +++ b/test/legacy_test/test_layers.py @@ -637,8 +637,8 @@ class TestLayer(LayerTest): dy_rlt = emb2(base.to_variable(inp_word)) dy_rlt_value = dy_rlt.numpy() - self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) + np.testing.assert_allclose(static_rlt2[0], static_rlt) + np.testing.assert_allclose(dy_rlt_value[0], static_rlt) with self.dynamic_graph(): custom_weight = np.random.randn(dict_size, 32).astype("float32") diff --git a/test/legacy_test/test_linspace.py b/test/legacy_test/test_linspace.py index f36a5e7c8cb1bf7e6b52cc99cc1e1aa55648a650..6468ad08c8fb559cd53cfa875929da00342b46bd 100644 --- a/test/legacy_test/test_linspace.py +++ b/test/legacy_test/test_linspace.py @@ -169,7 +169,7 @@ class TestLinspaceAPI(unittest.TestCase): res_1, res_2, res_3 = exe.run( fluid.default_main_program(), fetch_list=[out_1, out_2, out_3] ) - assert np.array_equal(res_1, res_2) + np.testing.assert_array_equal(res_1, res_2) def test_name(self): with paddle_static_guard(): diff --git a/test/legacy_test/test_logspace.py b/test/legacy_test/test_logspace.py index e68dba46fefc6b632060c0b18ec77f29498cd905..0587846bc4841f6005507aa0e77c16744b6b5cec 100644 --- a/test/legacy_test/test_logspace.py +++ b/test/legacy_test/test_logspace.py @@ -179,7 +179,7 @@ class TestLogspaceAPI(unittest.TestCase): exe = paddle.static.Executor() res_1, res_2 = exe.run(prog, fetch_list=[out_1, out_2]) - assert np.array_equal(res_1, res_2) + np.testing.assert_array_equal(res_1, res_2) paddle.disable_static() def test_name(self): diff --git a/test/legacy_test/test_lrn_op.py b/test/legacy_test/test_lrn_op.py index ff087fa44823e363f808724cda63ba6094c08e0f..df9b1ebccf481888e5e57caced65727f80486870 100644 --- a/test/legacy_test/test_lrn_op.py +++ b/test/legacy_test/test_lrn_op.py @@ -371,7 +371,7 @@ class TestLocalResponseNormCAPI(unittest.TestCase): fetch_list=[y], ) - assert np.array_equal(res[0].shape, input.shape) + np.testing.assert_array_equal(res[0].shape, input.shape) if __name__ == "__main__": diff --git a/test/legacy_test/test_meshgrid_op.py b/test/legacy_test/test_meshgrid_op.py index 377699e3855ecdcfb439a6213ee621368894cdca..d2f7b0c2eca89197dce90967586bb63dc3fc0643 100644 --- a/test/legacy_test/test_meshgrid_op.py +++ b/test/legacy_test/test_meshgrid_op.py @@ -162,8 +162,8 @@ class TestMeshgridOp3(unittest.TestCase): feed={'x': input_1, 'y': input_2}, fetch_list=[grid_x, grid_y], ) - assert np.array_equal(res_1, out_1) - assert np.array_equal(res_2, out_2) + np.testing.assert_array_equal(res_1, out_1) + np.testing.assert_array_equal(res_2, out_2) class TestMeshgridOp4(unittest.TestCase): @@ -199,8 +199,8 @@ class TestMeshgridOp4(unittest.TestCase): fetch_list=[grid_x, grid_y], ) - assert np.array_equal(res_1, out_1) - assert np.array_equal(res_2, out_2) + np.testing.assert_array_equal(res_1, out_1) + np.testing.assert_array_equal(res_2, out_2) class TestMeshgridOp5(unittest.TestCase): @@ -236,8 +236,8 @@ class TestMeshgridOp5(unittest.TestCase): fetch_list=[grid_x, grid_y], ) - assert np.array_equal(res_1, out_1) - assert np.array_equal(res_2, out_2) + np.testing.assert_array_equal(res_1, out_1) + np.testing.assert_array_equal(res_2, out_2) class TestMeshgridOp6(unittest.TestCase): @@ -262,8 +262,8 @@ class TestMeshgridOp6(unittest.TestCase): tensor_4 = fluid.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid(tensor_3, tensor_4) - assert np.array_equal(res_3.shape, [100, 200]) - assert np.array_equal(res_4.shape, [100, 200]) + np.testing.assert_array_equal(res_3.shape, [100, 200]) + np.testing.assert_array_equal(res_4.shape, [100, 200]) class TestMeshgridOp7(unittest.TestCase): @@ -288,8 +288,8 @@ class TestMeshgridOp7(unittest.TestCase): tensor_4 = fluid.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid([tensor_3, tensor_4]) - assert np.array_equal(res_3.shape, [100, 200]) - assert np.array_equal(res_4.shape, [100, 200]) + np.testing.assert_array_equal(res_3.shape, [100, 200]) + np.testing.assert_array_equal(res_4.shape, [100, 200]) class TestMeshgridOp8(unittest.TestCase): @@ -314,8 +314,8 @@ class TestMeshgridOp8(unittest.TestCase): tensor_4 = fluid.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid((tensor_3, tensor_4)) - assert np.array_equal(res_3.shape, [100, 200]) - assert np.array_equal(res_4.shape, [100, 200]) + np.testing.assert_array_equal(res_3.shape, [100, 200]) + np.testing.assert_array_equal(res_4.shape, [100, 200]) class TestMeshGrid_ZeroDim(TestMeshgridOp): diff --git a/test/legacy_test/test_multiprocess_dataloader_dataset.py b/test/legacy_test/test_multiprocess_dataloader_dataset.py index 9e2b89b12860c8bd7e3b2aa0144d305105843ad0..7eb99ef7006e75d3b59aeedcc950e7d4d74db61b 100755 --- a/test/legacy_test/test_multiprocess_dataloader_dataset.py +++ b/test/legacy_test/test_multiprocess_dataloader_dataset.py @@ -84,8 +84,8 @@ class TestTensorDataset(unittest.TestCase): assert label.shape == [1, 1] assert isinstance(input, fluid.core.eager.Tensor) assert isinstance(label, fluid.core.eager.Tensor) - assert np.allclose(input.numpy(), input_np[i]) - assert np.allclose(label.numpy(), label_np[i]) + np.testing.assert_allclose(input.numpy(), input_np[i]) + np.testing.assert_allclose(label.numpy(), label_np[i]) def test_main(self): places = [paddle.CPUPlace()] @@ -109,10 +109,10 @@ class TestComposeDataset(unittest.TestCase): input1, label1, input2, label2 = dataset[i] input1_t, label1_t = dataset1[i] input2_t, label2_t = dataset2[i] - assert np.allclose(input1, input1_t) - assert np.allclose(label1, label1_t) - assert np.allclose(input2, input2_t) - assert np.allclose(label2, label2_t) + np.testing.assert_allclose(input1, input1_t) + np.testing.assert_allclose(label1, label1_t) + np.testing.assert_allclose(input2, input2_t) + np.testing.assert_allclose(label2, label2_t) class TestRandomSplitApi(unittest.TestCase): @@ -226,12 +226,12 @@ class TestChainDataset(unittest.TestCase): idx = 0 for image, label in iter(dataset1): - assert np.allclose(image, samples[idx][0]) - assert np.allclose(label, samples[idx][1]) + np.testing.assert_allclose(image, samples[idx][0]) + np.testing.assert_allclose(label, samples[idx][1]) idx += 1 for image, label in iter(dataset2): - assert np.allclose(image, samples[idx][0]) - assert np.allclose(label, samples[idx][1]) + np.testing.assert_allclose(image, samples[idx][0]) + np.testing.assert_allclose(label, samples[idx][1]) idx += 1 def test_main(self): diff --git a/test/legacy_test/test_nan_to_num_op.py b/test/legacy_test/test_nan_to_num_op.py index 2aad0ff5bdac324b6d0fa0da6a1d17ad4efb07e7..a5e0bbe62f4e23db9a7b5c1f134358dad8cdc9e7 100644 --- a/test/legacy_test/test_nan_to_num_op.py +++ b/test/legacy_test/test_nan_to_num_op.py @@ -79,10 +79,10 @@ class TestNanToNum(unittest.TestCase): exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': x_np}, fetch_list=[out1, out2, out3, out4]) - self.assertTrue(np.allclose(out1_np, res[0])) - self.assertTrue(np.allclose(out2_np, res[1])) - self.assertTrue(np.allclose(out3_np, res[2])) - self.assertTrue(np.allclose(out4_np, res[3])) + np.testing.assert_allclose(out1_np, res[0]) + np.testing.assert_allclose(out2_np, res[1]) + np.testing.assert_allclose(out3_np, res[2]) + np.testing.assert_allclose(out4_np, res[3]) def test_dygraph(self): paddle.disable_static(place=self.place) @@ -97,23 +97,23 @@ class TestNanToNum(unittest.TestCase): out_tensor = paddle.nan_to_num(x_tensor) out_np = np_nan_to_num(x_np) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, None, None) out_np = np_nan_to_num(x_np, 1, None, None) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, 2.0, None) out_np = np_nan_to_num(x_np, 1, 2, None) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, None, -10.0) out_np = np_nan_to_num(x_np, 1, None, -10) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, 100.0, -10.0) out_np = np_nan_to_num(x_np, 1, 100, -10) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) paddle.enable_static() @@ -128,7 +128,7 @@ class TestNanToNum(unittest.TestCase): dx = paddle.grad(y, x_tensor)[0].numpy() np_grad = np_nan_to_num_grad(x_np, np.ones_like(x_np)) - self.assertTrue(np.allclose(np_grad, dx)) + np.testing.assert_allclose(np_grad, dx) paddle.enable_static() diff --git a/test/legacy_test/test_number_count_op.py b/test/legacy_test/test_number_count_op.py index d2a1dcbf5282430cc3653ec726df7eecbf3c7689..07185a8dfeefc19c5dd0fadf05803715c3b6e235 100644 --- a/test/legacy_test/test_number_count_op.py +++ b/test/legacy_test/test_number_count_op.py @@ -71,7 +71,7 @@ class TestNumberCountAPI(unittest.TestCase): paddle.disable_static() x = paddle.to_tensor(self.x) out = utils._number_count(x, self.upper_num) - assert np.allclose(out.numpy(), self.out) + np.testing.assert_allclose(out.numpy(), self.out) if __name__ == '__main__': diff --git a/test/legacy_test/test_numel_op.py b/test/legacy_test/test_numel_op.py index 9d87d242a87f8ea7af1bb982c02fc5492c72fc6b..b4b18ccbe07f773739f5b6763f8226ef5da5dce6 100644 --- a/test/legacy_test/test_numel_op.py +++ b/test/legacy_test/test_numel_op.py @@ -120,10 +120,10 @@ class TestNumelAPI(unittest.TestCase): }, fetch_list=[out_1, out_2], ) - assert np.array_equal( + np.testing.assert_array_equal( res_1, np.array(np.size(input_1)).astype("int64") ) - assert np.array_equal( + np.testing.assert_array_equal( res_2, np.array(np.size(input_2)).astype("int64") ) @@ -135,8 +135,8 @@ class TestNumelAPI(unittest.TestCase): x_2 = paddle.to_tensor(input_2) out_1 = paddle.numel(x_1) out_2 = paddle.numel(x_2) - assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) - assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) + np.testing.assert_array_equal(out_1.numpy().item(0), np.size(input_1)) + np.testing.assert_array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/test/legacy_test/test_pixel_shuffle_op.py b/test/legacy_test/test_pixel_shuffle_op.py index c20ba8678d80684015c06de41f3fee868c69bcb5..7c6f18479fd11db8bb15f42f83228ceb25a0bbbe 100644 --- a/test/legacy_test/test_pixel_shuffle_op.py +++ b/test/legacy_test/test_pixel_shuffle_op.py @@ -186,17 +186,17 @@ class TestPixelShuffleAPI(unittest.TestCase): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, self.out_1_np) - assert np.allclose(res_2, self.out_2_np) + np.testing.assert_allclose(res_1, self.out_1_np) + np.testing.assert_allclose(res_2, self.out_2_np) def test_api_fp16(self): paddle.enable_static() @@ -226,15 +226,15 @@ class TestPixelShuffleAPI(unittest.TestCase): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + )[0] + np.testing.assert_allclose(res_1, out_1_np) + np.testing.assert_allclose(res_2, out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): @@ -264,17 +264,17 @@ class TestPixelShuffleAPI(unittest.TestCase): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + np.testing.assert_allclose(res_1, out_1_np, rtol=1e-5, atol=1e-8) + np.testing.assert_allclose(res_2, out_2_np, rtol=1e-5, atol=1e-8) def run_dygraph(self, up_factor, data_format): n, c, h, w = 2, 9, 4, 4 diff --git a/test/legacy_test/test_pixel_unshuffle.py b/test/legacy_test/test_pixel_unshuffle.py index b2cfd457603c429f85faca1b8bb6e4a2004d50d0..2353ca0192c7e243c4475e98b237e9d4b833e6fa 100644 --- a/test/legacy_test/test_pixel_unshuffle.py +++ b/test/legacy_test/test_pixel_unshuffle.py @@ -225,17 +225,17 @@ class TestPixelUnshuffleAPI(unittest.TestCase): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, self.out_1_np) - assert np.allclose(res_2, self.out_2_np) + np.testing.assert_allclose(res_1, self.out_1_np) + np.testing.assert_allclose(res_2, self.out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): @@ -267,17 +267,17 @@ class TestPixelUnshuffleAPI(unittest.TestCase): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + np.testing.assert_allclose(res_1, out_1_np) + np.testing.assert_allclose(res_2, out_2_np) def run_dygraph(self, down_factor, data_format): '''run_dygraph''' diff --git a/test/legacy_test/test_pool3d_api.py b/test/legacy_test/test_pool3d_api.py index 80f22f1467e27fd15d6e9536659051729fdc8470..a9e849fb91d41862cb1ef425681a6c0518799f81 100644 --- a/test/legacy_test/test_pool3d_api.py +++ b/test/legacy_test/test_pool3d_api.py @@ -391,7 +391,7 @@ class TestPool3D_API(unittest.TestCase): fetch_list=[y], ) - assert np.array_equal(res[0].shape, [1, 2, 1, 16, 16]) + np.testing.assert_array_equal(res[0].shape, [1, 2, 1, 16, 16]) def test_static_bf16_gpu(self): paddle.enable_static() @@ -421,7 +421,7 @@ class TestPool3D_API(unittest.TestCase): fetch_list=[y], ) - assert np.array_equal(res[0].shape, [1, 2, 1, 16, 16]) + np.testing.assert_array_equal(res[0].shape, [1, 2, 1, 16, 16]) class TestPool3DError_API(unittest.TestCase): diff --git a/test/legacy_test/test_prune_gate_by_capacity_op.py b/test/legacy_test/test_prune_gate_by_capacity_op.py index e52d67185e374a2567b4729307076f09886cdd4b..d6103be8d13d5a3d627e5f32e5dea4c970e59f01 100644 --- a/test/legacy_test/test_prune_gate_by_capacity_op.py +++ b/test/legacy_test/test_prune_gate_by_capacity_op.py @@ -62,7 +62,7 @@ def prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker): def assert_allclose(output, expected, n_expert): c1 = count(output, n_expert) c2 = count(expected, n_expert) - assert np.allclose(c1, c2) + np.testing.assert_allclose(c1, c2) @unittest.skipIf( diff --git a/test/legacy_test/test_random_routing_op.py b/test/legacy_test/test_random_routing_op.py index e7f0412eecfef0b5042a247a6959937d3f64f603..8977e300110447d7385af03e4737589b3e47b2d2 100644 --- a/test/legacy_test/test_random_routing_op.py +++ b/test/legacy_test/test_random_routing_op.py @@ -59,7 +59,7 @@ class TestNumberCountAPIFp32(unittest.TestCase): value = paddle.to_tensor(self.topk_value) prob = paddle.to_tensor(self.prob) out = utils._random_routing(x, value, prob) - assert np.allclose(out.numpy(), self.out) + np.testing.assert_allclose(out.numpy(), self.out) @unittest.skipIf( diff --git a/test/legacy_test/test_reshape_op.py b/test/legacy_test/test_reshape_op.py index 2feecb5005b14db2bf648ba1f14e1f6c90176a1a..dc85f407aceab244290ad8131f341afc2b8d7ee3 100755 --- a/test/legacy_test/test_reshape_op.py +++ b/test/legacy_test/test_reshape_op.py @@ -400,10 +400,10 @@ class TestReshapeAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4], ) - assert np.array_equal(res_1, input.reshape(shape)) - assert np.array_equal(res_2, input.reshape(shape)) - assert np.array_equal(res_3, input.reshape([5, 10])) - assert np.array_equal(res_4, input.reshape(shape)) + np.testing.assert_array_equal(res_1, input.reshape(shape)) + np.testing.assert_array_equal(res_2, input.reshape(shape)) + np.testing.assert_array_equal(res_3, input.reshape([5, 10])) + np.testing.assert_array_equal(res_4, input.reshape(shape)) def test_paddle_api(self): self._set_paddle_api() @@ -424,9 +424,9 @@ class TestReshapeAPI(unittest.TestCase): shape_tensor = self.to_tensor(np.array([2, 5, 5]).astype("int32")) out_3 = self.reshape(x, shape=shape_tensor) - assert np.array_equal(out_1.numpy(), input.reshape(shape)) - assert np.array_equal(out_2.numpy(), input.reshape([5, 10])) - assert np.array_equal(out_3.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_1.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_2.numpy(), input.reshape([5, 10])) + np.testing.assert_array_equal(out_3.numpy(), input.reshape(shape)) class TestStaticReshape_(TestReshapeAPI): @@ -448,9 +448,9 @@ class TestStaticReshape_(TestReshapeAPI): shape_tensor = self.to_tensor(np.array([2, 5, 5]).astype("int32")) out_3 = self.reshape(x, shape=shape_tensor) - assert np.array_equal(out_1.numpy(), input.reshape(shape)) - assert np.array_equal(out_2.numpy(), input.reshape(shape)) - assert np.array_equal(out_3.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_1.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_2.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_3.numpy(), input.reshape(shape)) # Test Input Error diff --git a/test/legacy_test/test_size_op.py b/test/legacy_test/test_size_op.py index dfff90b742fcae869ce843551e201b972c9bab63..0bb3ac64bce75e926197188909f996151569ea90 100644 --- a/test/legacy_test/test_size_op.py +++ b/test/legacy_test/test_size_op.py @@ -83,10 +83,10 @@ class TestSizeAPI(unittest.TestCase): }, fetch_list=[out_1, out_2], ) - assert np.array_equal( + np.testing.assert_array_equal( res_1, np.array(np.size(input_1)).astype("int64") ) - assert np.array_equal( + np.testing.assert_array_equal( res_2, np.array(np.size(input_2)).astype("int64") ) @@ -98,8 +98,8 @@ class TestSizeAPI(unittest.TestCase): x_2 = paddle.to_tensor(input_2) out_1 = paddle.numel(x_1) out_2 = paddle.numel(x_2) - assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) - assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) + np.testing.assert_array_equal(out_1.numpy().item(0), np.size(input_1)) + np.testing.assert_array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/test/legacy_test/test_slice_op.py b/test/legacy_test/test_slice_op.py index 5be9e8df440b0e0cce39ba23e7642395b24018f2..271824bd2fd5ee5cb9937cf468c5bed4309959e5 100644 --- a/test/legacy_test/test_slice_op.py +++ b/test/legacy_test/test_slice_op.py @@ -631,13 +631,13 @@ class TestSliceAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], ) - assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_7, input[-1, 0:100, :, 2:-1]) class TestSliceApiWithTensor(unittest.TestCase): diff --git a/test/legacy_test/test_sparse_conv_op.py b/test/legacy_test/test_sparse_conv_op.py index 7ca55d58a6a4bbcc9668a7d8988c36c416fc5467..2d2af3c11fcb6c0cf3ddc91971cd4dcaa098cecd 100644 --- a/test/legacy_test/test_sparse_conv_op.py +++ b/test/legacy_test/test_sparse_conv_op.py @@ -94,7 +94,7 @@ class TestSparseConv(unittest.TestCase): ) out.backward(out) out = paddle.sparse.coalesce(out) - assert np.array_equal(correct_out_values, out.values().numpy()) + np.testing.assert_array_equal(correct_out_values, out.values().numpy()) def test_subm_conv2d(self): indices = [[0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] @@ -126,7 +126,9 @@ class TestSparseConv(unittest.TestCase): y = paddle.sparse.nn.functional.subm_conv3d( sparse_x, weight, key='subm_conv' ) - assert np.array_equal(sparse_x.indices().numpy(), y.indices().numpy()) + np.testing.assert_array_equal( + sparse_x.indices().numpy(), y.indices().numpy() + ) def test_Conv2D(self): # (3, non_zero_num), 3-D:(N, H, W) @@ -223,7 +225,7 @@ class TestSparseConv(unittest.TestCase): sparse_out = subm_conv3d(sparse_input) # the output shape of subm_conv is same as input shape - assert np.array_equal(indices, sparse_out.indices().numpy()) + np.testing.assert_array_equal(indices, sparse_out.indices().numpy()) # test errors with self.assertRaises(ValueError): @@ -294,14 +296,16 @@ class TestSparseConv(unittest.TestCase): dense_out = sp_out.to_dense() sp_loss = dense_out.mean() sp_loss.backward() - assert np.allclose(out.numpy(), dense_out.numpy(), atol=1e-3, rtol=1e-3) - assert np.allclose( + np.testing.assert_allclose( + out.numpy(), dense_out.numpy(), atol=1e-3, rtol=1e-3 + ) + np.testing.assert_allclose( conv3d.weight.grad.numpy().transpose(2, 3, 4, 1, 0), sp_conv3d.weight.grad.numpy(), atol=1e-3, rtol=1e-3, ) - assert np.allclose( + np.testing.assert_allclose( conv3d.bias.grad.numpy(), sp_conv3d.bias.grad.numpy(), atol=1e-5, diff --git a/test/legacy_test/test_sparse_copy_op.py b/test/legacy_test/test_sparse_copy_op.py index 237b9806506768cfb7a6a7b1a8a0ab29e7b85086..a97148c703fc5e202abb77c0ce3575f7aa3c6f03 100644 --- a/test/legacy_test/test_sparse_copy_op.py +++ b/test/legacy_test/test_sparse_copy_op.py @@ -30,7 +30,7 @@ class TestSparseCopy(unittest.TestCase): dense_x_2 = paddle.to_tensor(np_x_2, dtype='float32') coo_x_2 = dense_x_2.to_sparse_coo(2) coo_x_2.copy_(coo_x, True) - assert np.array_equal(np_values, coo_x_2.values().numpy()) + np.testing.assert_array_equal(np_values, coo_x_2.values().numpy()) def test_copy_sparse_csr(self): np_x = [[0, 1.0, 0], [2.0, 0, 0], [0, 3.0, 0]] @@ -42,4 +42,4 @@ class TestSparseCopy(unittest.TestCase): dense_x_2 = paddle.to_tensor(np_x_2, dtype='float32') csr_x_2 = dense_x_2.to_sparse_csr() csr_x_2.copy_(csr_x, True) - assert np.array_equal(np_values, csr_x_2.values().numpy()) + np.testing.assert_array_equal(np_values, csr_x_2.values().numpy()) diff --git a/test/legacy_test/test_sparse_model.py b/test/legacy_test/test_sparse_model.py index 2b7c646b3ab497a8c0ab3d29e5b4628973b050a9..9e71757f90342394e0eef6ff17aaea2c9b2257f6 100644 --- a/test/legacy_test/test_sparse_model.py +++ b/test/legacy_test/test_sparse_model.py @@ -54,13 +54,19 @@ class TestGradientAdd(unittest.TestCase): sparse_loss = sparse_out.values().mean() sparse_loss.backward(retain_graph=True) - assert np.allclose(dense_out.numpy(), sparse_out.to_dense().numpy()) - assert np.allclose(x.grad.numpy(), sparse_x.grad.to_dense().numpy()) + np.testing.assert_allclose( + dense_out.numpy(), sparse_out.to_dense().numpy() + ) + np.testing.assert_allclose( + x.grad.numpy(), sparse_x.grad.to_dense().numpy() + ) loss.backward() sparse_loss.backward() - assert np.allclose(x.grad.numpy(), sparse_x.grad.to_dense().numpy()) + np.testing.assert_allclose( + x.grad.numpy(), sparse_x.grad.to_dense().numpy() + ) if __name__ == "__main__": diff --git a/test/legacy_test/test_sparse_norm_op.py b/test/legacy_test/test_sparse_norm_op.py index 25a253d9787b44d9935f38ca3d0feb0413d888ba..7d745ada2da7fc69578fd6724e8f6bcf0c8a96f3 100644 --- a/test/legacy_test/test_sparse_norm_op.py +++ b/test/legacy_test/test_sparse_norm_op.py @@ -48,7 +48,7 @@ class TestSparseBatchNorm(unittest.TestCase): sparse_y = sparse_batch_norm(sparse_x) # compare the result with dense batch_norm - assert np.allclose( + np.testing.assert_allclose( dense_y.flatten().numpy(), sparse_y.values().flatten().numpy(), atol=1e-5, @@ -57,7 +57,7 @@ class TestSparseBatchNorm(unittest.TestCase): # test backward sparse_y.backward(sparse_y) - assert np.allclose( + np.testing.assert_allclose( dense_x.grad.flatten().numpy(), sparse_x.grad.values().flatten().numpy(), atol=1e-5, @@ -85,7 +85,9 @@ class TestSparseBatchNorm(unittest.TestCase): dense_bn = paddle.nn.BatchNorm1D(channels) dense_x = dense_x.reshape((-1, dense_x.shape[-1])) dense_out = dense_bn(dense_x) - assert np.allclose(dense_out.numpy(), batch_norm_out.values().numpy()) + np.testing.assert_allclose( + dense_out.numpy(), batch_norm_out.values().numpy() + ) # [1, 6, 6, 6, 3] def check(self, shape): @@ -141,7 +143,7 @@ class TestSyncBatchNorm(unittest.TestCase): dense_sync_bn = paddle.nn.SyncBatchNorm(2) x = x.reshape((-1, x.shape[-1])) dense_hidden = dense_sync_bn(x) - assert np.allclose( + np.testing.assert_allclose( sparse_hidden.values().numpy(), dense_hidden.numpy() ) diff --git a/test/legacy_test/test_sparse_pooling_op.py b/test/legacy_test/test_sparse_pooling_op.py index 1a031329fa584143608f07804bd2a4aa757e632d..f8d0cabd304ff0da16c6633a5dccf7078490c122 100644 --- a/test/legacy_test/test_sparse_pooling_op.py +++ b/test/legacy_test/test_sparse_pooling_op.py @@ -64,8 +64,10 @@ class TestMaxPool3DFunc(unittest.TestCase): dense_out.backward(dense_out) # compare with dense - assert np.allclose(dense_out.numpy(), out.numpy()) - assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy()) + np.testing.assert_allclose(dense_out.numpy(), out.numpy()) + np.testing.assert_allclose( + dense_x.grad.numpy(), self.dense_x.grad.numpy() + ) class TestStride(TestMaxPool3DFunc): @@ -111,7 +113,7 @@ class TestMaxPool3DAPI(unittest.TestCase): dense_out = paddle.nn.functional.max_pool3d( dense_x, 3, data_format='NDHWC' ) - assert np.allclose(dense_out.numpy(), out.numpy()) + np.testing.assert_allclose(dense_out.numpy(), out.numpy()) if __name__ == "__main__": diff --git a/test/legacy_test/test_sparse_utils_op.py b/test/legacy_test/test_sparse_utils_op.py index 60cf3a7a5208e5c05f5df3b843429b17bff5a46e..2b7583db921755a4c8dfdade1e4e4e670aa99401 100644 --- a/test/legacy_test/test_sparse_utils_op.py +++ b/test/legacy_test/test_sparse_utils_op.py @@ -33,17 +33,17 @@ class TestSparseCreate(unittest.TestCase): dense_indices, dense_elements, dense_shape, stop_gradient=False ) # test the to_string.py - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_create_coo_by_np(self): indices = [[0, 1, 2], [1, 2, 0]] values = [1.0, 2.0, 3.0] dense_shape = [3, 3] coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape) - assert np.array_equal(3, coo.nnz()) - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(3, coo.nnz()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_create_csr_by_tensor(self): crows = [0, 2, 3, 5] @@ -69,10 +69,10 @@ class TestSparseCreate(unittest.TestCase): dense_shape = [3, 4] csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape) # test the to_string.py - assert np.array_equal(5, csr.nnz()) - assert np.array_equal(crows, csr.crows().numpy()) - assert np.array_equal(cols, csr.cols().numpy()) - assert np.array_equal(values, csr.values().numpy()) + np.testing.assert_array_equal(5, csr.nnz()) + np.testing.assert_array_equal(crows, csr.crows().numpy()) + np.testing.assert_array_equal(cols, csr.cols().numpy()) + np.testing.assert_array_equal(values, csr.values().numpy()) def test_place(self): place = core.CPUPlace() @@ -132,8 +132,8 @@ class TestSparseConvert(unittest.TestCase): values = [1.0, 2.0, 3.0, 4.0, 5.0] dense_x = paddle.to_tensor(x, dtype='float32', stop_gradient=False) out = dense_x.to_sparse_coo(2) - assert np.array_equal(out.indices().numpy(), indices) - assert np.array_equal(out.values().numpy(), values) + np.testing.assert_array_equal(out.indices().numpy(), indices) + np.testing.assert_array_equal(out.values().numpy(), values) # test to_sparse_coo_grad backward out_grad_indices = [[0, 1], [0, 1]] out_grad_values = [2.0, 3.0] @@ -144,7 +144,9 @@ class TestSparseConvert(unittest.TestCase): stop_gradient=True, ) out.backward(out_grad) - assert np.array_equal(dense_x.grad.numpy(), out_grad.to_dense().numpy()) + np.testing.assert_array_equal( + dense_x.grad.numpy(), out_grad.to_dense().numpy() + ) def test_coo_to_dense(self): indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] @@ -168,7 +170,7 @@ class TestSparseConvert(unittest.TestCase): dense_tensor.backward(paddle.to_tensor(out_grad)) # mask the out_grad by sparse_x.indices() correct_x_grad = [2.0, 4.0, 7.0, 9.0, 10.0] - assert np.array_equal( + np.testing.assert_array_equal( correct_x_grad, sparse_x.grad.values().numpy() ) @@ -182,7 +184,7 @@ class TestSparseConvert(unittest.TestCase): sparse_x_cpu.retain_grads() dense_tensor_cpu = sparse_x_cpu.to_dense() dense_tensor_cpu.backward(paddle.to_tensor(out_grad)) - assert np.array_equal( + np.testing.assert_array_equal( correct_x_grad, sparse_x_cpu.grad.values().numpy() ) @@ -193,12 +195,12 @@ class TestSparseConvert(unittest.TestCase): values = [1, 2, 3, 4, 5] dense_x = paddle.to_tensor(x) out = dense_x.to_sparse_csr() - assert np.array_equal(out.crows().numpy(), crows) - assert np.array_equal(out.cols().numpy(), cols) - assert np.array_equal(out.values().numpy(), values) + np.testing.assert_array_equal(out.crows().numpy(), crows) + np.testing.assert_array_equal(out.cols().numpy(), cols) + np.testing.assert_array_equal(out.values().numpy(), values) dense_tensor = out.to_dense() - assert np.array_equal(dense_tensor.numpy(), x) + np.testing.assert_array_equal(dense_tensor.numpy(), x) def test_coo_values_grad(self): indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] @@ -214,7 +216,7 @@ class TestSparseConvert(unittest.TestCase): out_grad = [2.0, 3.0, 5.0, 8.0, 9.0] # test coo_values_grad values_tensor.backward(paddle.to_tensor(out_grad)) - assert np.array_equal(out_grad, sparse_x.grad.values().numpy()) + np.testing.assert_array_equal(out_grad, sparse_x.grad.values().numpy()) indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] values = [ [1.0, 1.0], @@ -240,7 +242,7 @@ class TestSparseConvert(unittest.TestCase): ] # test coo_values_grad values_tensor.backward(paddle.to_tensor(out_grad)) - assert np.array_equal(out_grad, sparse_x.grad.values().numpy()) + np.testing.assert_array_equal(out_grad, sparse_x.grad.values().numpy()) def test_sparse_coo_tensor_grad(self): for device in devices: @@ -266,7 +268,9 @@ class TestSparseConvert(unittest.TestCase): ) sparse_x.backward(sparse_out_grad) correct_values_grad = [0, 3] - assert np.array_equal(correct_values_grad, values.grad.numpy()) + np.testing.assert_array_equal( + correct_values_grad, values.grad.numpy() + ) # test the non-zero values is a vector values = [[1, 1], [2, 2]] @@ -283,7 +287,9 @@ class TestSparseConvert(unittest.TestCase): ) sparse_x.backward(sparse_out_grad) correct_values_grad = [[0, 0], [3, 3]] - assert np.array_equal(correct_values_grad, values.grad.numpy()) + np.testing.assert_array_equal( + correct_values_grad, values.grad.numpy() + ) def test_sparse_coo_tensor_sorted(self): for device in devices: @@ -300,10 +306,12 @@ class TestSparseConvert(unittest.TestCase): sparse_x = paddle.sparse.coalesce(sparse_x) indices_sorted = [[0, 1], [1, 0]] values_sorted = [5.0, 1.0] - assert np.array_equal( + np.testing.assert_array_equal( indices_sorted, sparse_x.indices().numpy() ) - assert np.array_equal(values_sorted, sparse_x.values().numpy()) + np.testing.assert_array_equal( + values_sorted, sparse_x.values().numpy() + ) # test the non-zero values is a vector values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]] @@ -311,16 +319,18 @@ class TestSparseConvert(unittest.TestCase): sparse_x = paddle.sparse.sparse_coo_tensor(indices, values) sparse_x = paddle.sparse.coalesce(sparse_x) values_sorted = [[5.0, 5.0], [1.0, 1.0]] - assert np.array_equal( + np.testing.assert_array_equal( indices_sorted, sparse_x.indices().numpy() ) - assert np.array_equal(values_sorted, sparse_x.values().numpy()) + np.testing.assert_array_equal( + values_sorted, sparse_x.values().numpy() + ) def test_batch_csr(self): def verify(dense_x): sparse_x = dense_x.to_sparse_csr() out = sparse_x.to_dense() - assert np.allclose(out.numpy(), dense_x.numpy()) + np.testing.assert_allclose(out.numpy(), dense_x.numpy()) shape = np.random.randint(low=1, high=10, size=3) shape = list(shape) diff --git a/test/legacy_test/test_split_op.py b/test/legacy_test/test_split_op.py index b6d45b4e455d85dcf02fd52f95f7c3252348fece..8f7781f925ceae0a3fcb79b6beb01376ed220feb 100644 --- a/test/legacy_test/test_split_op.py +++ b/test/legacy_test/test_split_op.py @@ -323,12 +323,12 @@ class TestSplitAPI(unittest.TestCase): ) out = np.split(input_1, [2, 3], 1) - assert np.array_equal(res_0, out[0]) - assert np.array_equal(res_1, out[1]) - assert np.array_equal(res_2, out[2]) - assert np.array_equal(res_3, out[0]) - assert np.array_equal(res_4, out[1]) - assert np.array_equal(res_5, out[2]) + np.testing.assert_array_equal(res_0, out[0]) + np.testing.assert_array_equal(res_1, out[1]) + np.testing.assert_array_equal(res_2, out[2]) + np.testing.assert_array_equal(res_3, out[0]) + np.testing.assert_array_equal(res_4, out[1]) + np.testing.assert_array_equal(res_5, out[2]) class TestSplitOpError(unittest.TestCase): diff --git a/test/legacy_test/test_strided_slice_op.py b/test/legacy_test/test_strided_slice_op.py index 3181083e6968f47d47d3e4a11888af6af82b48c6..dde0bbf4e5c2522d31584b3770b57be204cf68cf 100644 --- a/test/legacy_test/test_strided_slice_op.py +++ b/test/legacy_test/test_strided_slice_op.py @@ -605,13 +605,13 @@ class TestStridedSliceAPI(unittest.TestCase): }, fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], ) - assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_5, input[-3:3, 0:100:2, -1:2:-1, :]) - assert np.array_equal(res_6, input[-3:3, 0:100:2, :, -1:2:-1]) - assert np.array_equal(res_7, input[-1, 0:100:2, :, -1:2:-1]) + np.testing.assert_array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_5, input[-3:3, 0:100:2, -1:2:-1, :]) + np.testing.assert_array_equal(res_6, input[-3:3, 0:100:2, :, -1:2:-1]) + np.testing.assert_array_equal(res_7, input[-1, 0:100:2, :, -1:2:-1]) def test_dygraph_op(self): x = paddle.zeros(shape=[3, 4, 5, 6], dtype="float32") diff --git a/test/legacy_test/test_tile_op.py b/test/legacy_test/test_tile_op.py index ab8d289aeae033c5816cbd46c1bc6211b8571edf..282a0a52e86e86a19a15e52be90d5e635da5fd63 100644 --- a/test/legacy_test/test_tile_op.py +++ b/test/legacy_test/test_tile_op.py @@ -387,9 +387,9 @@ class TestTileAPI(unittest.TestCase): out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) out_3 = paddle.tile(x, repeat_times=repeat_times) - assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) class TestTileDoubleGradCheck(unittest.TestCase): diff --git a/test/legacy_test/test_unbind_op.py b/test/legacy_test/test_unbind_op.py index 80c3db774a7bd641435506d535cf70f58eb71dbb..670433a84c8b552640ada4aa43b1f6983cdebfef 100644 --- a/test/legacy_test/test_unbind_op.py +++ b/test/legacy_test/test_unbind_op.py @@ -38,8 +38,8 @@ class TestUnbind(unittest.TestCase): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) def test_unbind_static_fp16_gpu(self): if paddle.fluid.core.is_compiled_with_cuda(): @@ -61,8 +61,8 @@ class TestUnbind(unittest.TestCase): fetch_list=[y], ) - assert np.array_equal(res[0], input[0, :]) - assert np.array_equal(res[1], input[1, :]) + np.testing.assert_array_equal(res[0], input[0, :]) + np.testing.assert_array_equal(res[1], input[1, :]) def test_unbind_dygraph(self): with fluid.dygraph.guard(): @@ -96,8 +96,8 @@ class TestLayersUnbind(unittest.TestCase): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) class TestUnbindOp(OpTest): diff --git a/test/legacy_test/test_unsqueeze2_op.py b/test/legacy_test/test_unsqueeze2_op.py index 2ba8d1204b90b7d44ea17519fc4955c50040f94e..df4115eb0c57a069aaca40e4b0df07d1daf2ff09 100755 --- a/test/legacy_test/test_unsqueeze2_op.py +++ b/test/legacy_test/test_unsqueeze2_op.py @@ -275,11 +275,11 @@ class TestUnsqueezeAPI(unittest.TestCase): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.array_equal(res_1, input.reshape([3, 1, 1, 2, 5, 1])) - assert np.array_equal(res_2, input.reshape([3, 1, 1, 2, 5, 1])) - assert np.array_equal(res_3, input.reshape([3, 1, 1, 2, 5, 1])) - assert np.array_equal(res_4, input.reshape([3, 2, 5, 1])) - assert np.array_equal(res_5, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_1, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_2, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_3, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_4, input.reshape([3, 2, 5, 1])) + np.testing.assert_array_equal(res_5, input.reshape([3, 1, 1, 2, 5, 1])) def test_error(self): def test_axes_type(): diff --git a/test/legacy_test/test_update_loss_scaling_op.py b/test/legacy_test/test_update_loss_scaling_op.py index 6060236c5c5c9379ac1369e89226413e1188bf89..56ffc0499699a099a09528066c4b32899e8df067 100644 --- a/test/legacy_test/test_update_loss_scaling_op.py +++ b/test/legacy_test/test_update_loss_scaling_op.py @@ -274,14 +274,20 @@ class TestUpdateLossScalingLayer(unittest.TestCase): ], ) - assert np.array_equal(result_v[0], a_v) - assert np.array_equal(result_v[1], b_v) - assert np.array_equal(result_v[0], result_v[2]) - assert np.array_equal(result_v[1], result_v[3]) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], a_v) + np.testing.assert_array_equal(result_v[1], b_v) + np.testing.assert_array_equal(result_v[0], result_v[2]) + np.testing.assert_array_equal(result_v[1], result_v[3]) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * incr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): with paddle_static_guard(): @@ -353,14 +359,20 @@ class TestUpdateLossScalingLayer(unittest.TestCase): num_bad_steps, ], ) - assert np.array_equal(result_v[0], np.zeros_like(a_v)) - assert np.array_equal(result_v[1], np.zeros_like(b_v)) - assert np.array_equal(result_v[2], np.zeros_like(a_v)) - assert np.array_equal(result_v[3], np.zeros_like(b_v)) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[1], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[2], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[3], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * decr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def test_loss_scaling_cpu(self): with paddle_static_guard(): diff --git a/test/legacy_test/test_where_op.py b/test/legacy_test/test_where_op.py index b68afe65c3a08b2ffb874d5ad8ba01850704f3d3..aa03f7276c1b3642d39f258a7663ff114112df1e 100644 --- a/test/legacy_test/test_where_op.py +++ b/test/legacy_test/test_where_op.py @@ -166,17 +166,17 @@ class TestWhereAPI(unittest.TestCase): feed={'cond': self.cond, 'x': self.x, 'y': self.y}, fetch_list=fetch_list, ) - assert np.array_equal(out[0], self.out) + np.testing.assert_array_equal(out[0], self.out) if x_stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_x_backward(out[1]) ) if y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[3], self.ref_y_backward(out[1]) ) elif y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_y_backward(out[1]) ) @@ -202,7 +202,9 @@ class TestWhereAPI(unittest.TestCase): feed={'x': x_i, 'y': y_i}, fetch_list=[result], ) - assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i)) + np.testing.assert_array_equal( + out[0], np.where((x_i > 1), x_i, y_i) + ) def test_scalar(self): paddle.enable_static() @@ -228,7 +230,7 @@ class TestWhereAPI(unittest.TestCase): fetch_list=[result], ) expect = np.where(cond_data, x_data, y_data) - assert np.array_equal(out[0], expect) + np.testing.assert_array_equal(out[0], expect) def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): paddle.enable_static() @@ -262,7 +264,7 @@ class TestWhereAPI(unittest.TestCase): fetch_list=[result], ) expect = np.where(cond_data, x_data, y_data) - assert np.array_equal(out[0], expect) + np.testing.assert_array_equal(out[0], expect) def test_static_api_broadcast_1(self): cond_shape = [2, 4] @@ -323,7 +325,9 @@ class TestWhereDygraphAPI(unittest.TestCase): y = fluid.dygraph.to_variable(y_i) cond = fluid.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) - assert np.array_equal(out.numpy(), np.where(cond_i, x_i, y_i)) + np.testing.assert_array_equal( + out.numpy(), np.where(cond_i, x_i, y_i) + ) def test_scalar(self): with fluid.dygraph.guard(): @@ -332,7 +336,7 @@ class TestWhereDygraphAPI(unittest.TestCase): y = 2.0 cond = fluid.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) - assert np.array_equal(out.numpy(), np.where(cond_i, x, y)) + np.testing.assert_array_equal(out.numpy(), np.where(cond_i, x, y)) def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): with fluid.dygraph.guard(): diff --git a/test/mkldnn/check_flags_mkldnn_ops_on_off.py b/test/mkldnn/check_flags_mkldnn_ops_on_off.py index 188f70bca1215b30a933c025149e26ada93a9d97..f00f3967225caf3f5dc7cb67e8d7a852f0da4f02 100644 --- a/test/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/test/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -54,7 +54,7 @@ def check(): np_res = np.add(a_np, b_np) np_res = np.matmul(np_res, np.transpose(b_np, (0, 2, 1))) np_res = np.maximum(np_res, 0) - assert np.allclose(res1.numpy(), np_res, atol=1e-3) + np.testing.assert_allclose(res1.numpy(), np_res, atol=1e-3) if __name__ == '__main__': diff --git a/test/mkldnn/check_flags_use_mkldnn.py b/test/mkldnn/check_flags_use_mkldnn.py index 4c9dd32e544437b08f8ef07d009f446b3bd9c1d6..07b4829743cd6219711a83b13cc9a2b85793dd63 100644 --- a/test/mkldnn/check_flags_use_mkldnn.py +++ b/test/mkldnn/check_flags_use_mkldnn.py @@ -38,7 +38,7 @@ def check(): a = fluid.dygraph.to_variable(a_np) res1 = func(a) res2 = np.maximum(a_np, 0) - assert np.array_equal(res1.numpy(), res2) + np.testing.assert_array_equal(res1.numpy(), res2) if __name__ == '__main__': diff --git a/test/quantization/test_quant2_int8_mkldnn_pass.py b/test/quantization/test_quant2_int8_mkldnn_pass.py index 61c700d23b7f4ecfb8c6732c1efc3d33946c670f..e51da1db81ba86b7ec7c4cd9e67865dbf513d824 100644 --- a/test/quantization/test_quant2_int8_mkldnn_pass.py +++ b/test/quantization/test_quant2_int8_mkldnn_pass.py @@ -92,7 +92,7 @@ class TestQuant2Int8MkldnnPassMul(unittest.TestCase): param.set(self.variables_mul["mul_weights"], self.place) qpass._dequantize_op_weights(graph, op_node, "Y", "Out") - assert np.allclose( + np.testing.assert_allclose( self.scope.find_var("mul_weights").get_tensor(), [ [ diff --git a/test/xpu/process_group_bkcl.py b/test/xpu/process_group_bkcl.py index 9c9b88862feab1050f31d1bdc2630eeb9d8b4512..e78b0bd3d98da11aecc6cb43cd83380a22f9587a 100644 --- a/test/xpu/process_group_bkcl.py +++ b/test/xpu/process_group_bkcl.py @@ -86,11 +86,11 @@ class TestProcessGroupFp32(unittest.TestCase): # XPU don't support event query by now, so just use sync op here task = dist.broadcast(tensor_x, 0) paddle.device.xpu.synchronize() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) paddle.device.xpu.synchronize() - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) sys.stdout.write(f"rank {pg.rank()}: test broadcast api ok\n") @@ -132,8 +132,8 @@ class TestProcessGroupFp32(unittest.TestCase): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) sys.stdout.write(f"rank {pg.rank()}: test allgather api ok\n") if pg.rank() == 0: @@ -150,8 +150,8 @@ class TestProcessGroupFp32(unittest.TestCase): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) sys.stdout.write(f"rank {pg.rank()}: test allgather api2 ok\n") # test Reduce @@ -171,8 +171,8 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() paddle.device.xpu.synchronize() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) - assert np.array_equal(tensor_y, old_tensor_y) + np.testing.assert_array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_y, old_tensor_y) sys.stdout.write(f"rank {pg.rank()}: test reduce sum api ok\n") # test reduce_scatter @@ -196,9 +196,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() paddle.device.xpu.synchronize() if pg.rank() == 0: - assert np.array_equal(need_result0, tensor_out) + np.testing.assert_array_equal(need_result0, tensor_out) else: - assert np.array_equal(need_result1, tensor_out) + np.testing.assert_array_equal(need_result1, tensor_out) sys.stdout.write(f"rank {pg.rank()}: test reduce_scatter sum api ok\n") # test send async api @@ -215,7 +215,7 @@ class TestProcessGroupFp32(unittest.TestCase): else: task = dist.recv(tensor_y, 0, sync_op=False) task.wait() - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) # test send sync api # rank 0 @@ -229,7 +229,7 @@ class TestProcessGroupFp32(unittest.TestCase): task = dist.send(tensor_x, 1, sync_op=True) else: task = dist.recv(tensor_y, 0, sync_op=True) - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) # test send 0-d tensor # rank 0 diff --git a/test/xpu/test_expand_as_v2_op_xpu.py b/test/xpu/test_expand_as_v2_op_xpu.py index 1843748e8ae0cc01d94c0b8ff0829da2e8479c8d..41f345091054c58e67f48bc5d76f54bdeacc4331 100644 --- a/test/xpu/test_expand_as_v2_op_xpu.py +++ b/test/xpu/test_expand_as_v2_op_xpu.py @@ -147,7 +147,7 @@ class TestExpandAsV2API(unittest.TestCase): feed={"x": x_np, "target_tensor": y_np}, fetch_list=[out_1], ) - assert np.array_equal(res_1[0], np.tile(x_np, (2, 1, 1))) + np.testing.assert_array_equal(res_1[0], np.tile(x_np, (2, 1, 1))) support_types = get_xpu_op_support_types('expand_as_v2') diff --git a/test/xpu/test_expand_v2_op_xpu.py b/test/xpu/test_expand_v2_op_xpu.py index 9d869d14b32e2ba379b7d1354a3e9dce7cdbdaab..ad5397ff3bcb255d9dc0502593a7bdf5beded457 100644 --- a/test/xpu/test_expand_v2_op_xpu.py +++ b/test/xpu/test_expand_v2_op_xpu.py @@ -226,9 +226,9 @@ class TestExpandV2API(unittest.TestCase): fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) support_types = get_xpu_op_support_types('expand_v2') diff --git a/test/xpu/test_masked_select_op_xpu.py b/test/xpu/test_masked_select_op_xpu.py index 3c29714a3d3b8bf1c76bf2a781be3f54d72cdfc7..f2ed82cd1e8d76ab1011b669fcb2bf5073d890b2 100644 --- a/test/xpu/test_masked_select_op_xpu.py +++ b/test/xpu/test_masked_select_op_xpu.py @@ -86,7 +86,7 @@ class TestMaskedSelectAPI(unittest.TestCase): mask = paddle.to_tensor(np_mask) out = paddle.masked_select(x, mask) np_out = np_masked_select(np_x, np_mask) - self.assertEqual(np.allclose(out.numpy(), np_out), True) + np.testing.assert_allclose(out.numpy(), np_out) paddle.enable_static() def test_static_mode(self): diff --git a/test/xpu/test_sparse_utils_op_xpu.py b/test/xpu/test_sparse_utils_op_xpu.py index 37e0d39130ffad1f436830daa02890b2c5cefe44..5a282563447a684ad066e5a914e55e451368ecc7 100644 --- a/test/xpu/test_sparse_utils_op_xpu.py +++ b/test/xpu/test_sparse_utils_op_xpu.py @@ -29,17 +29,17 @@ class TestSparseCreate(unittest.TestCase): coo = paddle.sparse.sparse_coo_tensor( dense_indices, dense_elements, dense_shape, stop_gradient=False ) - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_create_coo_by_np(self): indices = [[0, 1, 2], [1, 2, 0]] values = [1.0, 2.0, 3.0] dense_shape = [3, 3] coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape) - assert np.array_equal(3, coo.nnz()) - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(3, coo.nnz()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_place(self): indices = [[0, 1], [0, 1]] diff --git a/test/xpu/test_tile_op_xpu.py b/test/xpu/test_tile_op_xpu.py index 2e661199a0928dfbfd4af7bb48129dc0626a3633..ae2e1b2f0d7dd76d4bd0683b091498da52a4fe19 100644 --- a/test/xpu/test_tile_op_xpu.py +++ b/test/xpu/test_tile_op_xpu.py @@ -219,9 +219,9 @@ class TestTileAPI(unittest.TestCase): out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) out_3 = paddle.tile(x, repeat_times=repeat_times) - assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) class TestTileAPI_ZeroDim(unittest.TestCase): diff --git a/test/xpu/test_unbind_op_xpu.py b/test/xpu/test_unbind_op_xpu.py index fa77e80fb68061d8ee851f3d007d1f8c25b87f13..3ec10511a7e909ada5fbe091ad904c8005f110c4 100644 --- a/test/xpu/test_unbind_op_xpu.py +++ b/test/xpu/test_unbind_op_xpu.py @@ -50,8 +50,8 @@ class XPUTestUnbindOP(XPUOpTestWrapper): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) def test_unbind_dygraph(self): with fluid.dygraph.guard(): @@ -89,8 +89,8 @@ class XPUTestUnbindOP(XPUOpTestWrapper): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) class TestUnbindOp(XPUOpTest): def initParameters(self): diff --git a/test/xpu/test_update_loss_scaling_op_xpu.py b/test/xpu/test_update_loss_scaling_op_xpu.py index 86e6aac6badb51e2619272989bb72b0aae188b08..c8e398a3d7782c1fad3b6d983ccbf6eaa80a56dc 100644 --- a/test/xpu/test_update_loss_scaling_op_xpu.py +++ b/test/xpu/test_update_loss_scaling_op_xpu.py @@ -174,14 +174,20 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): num_bad_steps, ], ) - assert np.array_equal(result_v[0], a_v) - assert np.array_equal(result_v[1], b_v) - assert np.array_equal(result_v[0], result_v[2]) - assert np.array_equal(result_v[1], result_v[3]) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], a_v) + np.testing.assert_array_equal(result_v[1], b_v) + np.testing.assert_array_equal(result_v[0], result_v[2]) + np.testing.assert_array_equal(result_v[1], result_v[3]) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * incr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): a = paddle.static.data( @@ -252,14 +258,20 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): num_bad_steps, ], ) - assert np.array_equal(result_v[0], np.zeros_like(a_v)) - assert np.array_equal(result_v[1], np.zeros_like(b_v)) - assert np.array_equal(result_v[2], np.zeros_like(a_v)) - assert np.array_equal(result_v[3], np.zeros_like(b_v)) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[1], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[2], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[3], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * decr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def test_loss_scaling(self): main = fluid.Program() diff --git a/test/xpu/test_where_op_xpu.py b/test/xpu/test_where_op_xpu.py index 8dd7500517aed59b636c0d53855cae28ca35c753..13ec8c8c446a74ce9f74e13cee6f7bdadc772fd9 100644 --- a/test/xpu/test_where_op_xpu.py +++ b/test/xpu/test_where_op_xpu.py @@ -132,18 +132,18 @@ class TestXPUWhereAPI(unittest.TestCase): feed={'cond': self.cond, 'x': self.x, 'y': self.y}, fetch_list=fetch_list, ) - assert np.array_equal(out[0], self.out) + np.testing.assert_array_equal(out[0], self.out) if x_stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_x_backward(out[1]) ) if y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[3], self.ref_y_backward(out[1]) ) elif y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_y_backward(out[1]) ) @@ -165,7 +165,7 @@ class TestXPUWhereAPI(unittest.TestCase): out = exe.run( train_prog, feed={'x': x_i, 'y': y_i}, fetch_list=[result] ) - assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i)) + np.testing.assert_array_equal(out[0], np.where(x_i > 1, x_i, y_i)) class TestWhereDygraphAPI(unittest.TestCase): @@ -178,7 +178,9 @@ class TestWhereDygraphAPI(unittest.TestCase): y = fluid.dygraph.to_variable(y_i) cond = fluid.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) - assert np.array_equal(out.numpy(), np.where(cond_i, x_i, y_i)) + np.testing.assert_array_equal( + out.numpy(), np.where(cond_i, x_i, y_i) + ) if __name__ == '__main__':