diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index cb2b8dd538176461652ff4fcd96bfdb906eedb14..b8db0ad987cdfaec1fc9236c3f26e88891376dce 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -108,10 +108,7 @@ void StartServer(const std::string& endpoint) { rpc_service_->RunSyncUpdate(); } -// NOTE(yuyang18) : This test is buggy. -// 1. We should not use port 8889 before check. -// 2. We should not use sleep(2) to sync threads. -TEST(PREFETCH, DISABLED_CPU) { +TEST(PREFETCH, CPU) { // start up a server instance backend std::thread server_thread(StartServer, "127.0.0.1:8889"); sleep(2); diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 2bb46cecb484fa2b413ccd577be6bf6edee42f99..d5303eaf50722234d205264e56892b1723104d53 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -151,7 +151,7 @@ void StartServerNet(bool is_sparse, std::atomic *initialized) { LOG(INFO) << "server exit"; } -TEST(SendRecvOp, DISABLED_CPUDense) { +TEST(SendRecvOp, CPUDense) { std::atomic initialized{false}; std::thread server_thread(StartServerNet, false, &initialized); while (!initialized) { @@ -197,7 +197,7 @@ TEST(SendRecvOp, DISABLED_CPUDense) { paddle::operators::ListenAndServOp::ResetPort(); } -TEST(SendRecvOp, DISABLED_CPUSparse) { +TEST(SendRecvOp, CPUSparse) { std::atomic initialized; initialized = false; std::thread server_thread(StartServerNet, true, &initialized); diff --git a/paddle/fluid/operators/test_send_nccl_id.cc b/paddle/fluid/operators/test_send_nccl_id.cc index 719f039a0f5fcd7445bf1589a683f122e6d62ba0..bbae1d54aa3524fd45cb8ab13c86df8d54b8e643 100644 --- a/paddle/fluid/operators/test_send_nccl_id.cc +++ b/paddle/fluid/operators/test_send_nccl_id.cc @@ -63,7 +63,7 @@ void StartServer(std::atomic* initialized) { server_thread.join(); } -TEST(SendNcclId, DISABLED_Normal) { +TEST(SendNcclId, Normal) { std::atomic initialized{false}; std::thread server_thread(StartServer, &initialized); while (!initialized) { diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 70b7bee04fa6ca6688d8746fcf833155784abbea..0e274f769962a2b43959ff49cfba65094396672f 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -41,8 +41,10 @@ function(py_test_modules TARGET_NAME) endfunction() list(REMOVE_ITEM TEST_OPS test_warpctc_op) list(REMOVE_ITEM TEST_OPS test_dist_train) +list(REMOVE_ITEM TEST_OPS test_parallel_executor_crf) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) py_test_modules(test_dist_train MODULES test_dist_train SERIAL) +py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SERIAL) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index fc79bf3b275ccabc9622b2ef3ab64e289651d8f9..66e138b03f3b170aca4fb2207438eb9af1783c33 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -168,28 +168,24 @@ class TestCRFModel(unittest.TestCase): pe.run(feed=feeder.feed(cur_batch), fetch_list=[avg_cost.name]))[0] - @unittest.skip("Hang when parallel execute") def test_update_sparse_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce self.check_network_convergence( is_sparse=True, build_strategy=build_strategy) - @unittest.skip("Hang when parallel execute") def test_update_dense_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce self.check_network_convergence( is_sparse=False, build_strategy=build_strategy) - @unittest.skip("Hang when parallel execute") def test_update_sparse_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce self.check_network_convergence( is_sparse=True, build_strategy=build_strategy) - @unittest.skip("Hang wen parallel execute") def test_update_dense_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index 65afbd3876e5710e27cdc4f04b3483cf0dce043d..ac638f7836f8205f80e31cfd5eb8892b2c7aee08 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -210,11 +210,9 @@ class TestWarpCTCOp(OpTest): self.outputs = {"Loss": loss} self.attrs = {"blank": self.blank, "norm_by_times": self.norm_by_times} - @unittest.skip("This unittest could be hang") def test_check_output(self): self.check_output() - @unittest.skip("This unittest could be hang") def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient self.check_grad(["Logits"], "Loss", max_relative_error=0.007)