diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 040a68f67264d0068fc484b6629828bd2bdd222a..85977366e61c676fc5d2d3c5d22dd2f606543684 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -186,7 +186,6 @@ RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) RecordEvent::~RecordEvent() { if (g_state == ProfilerState::kDisabled || !is_enabled_) return; - VLOG(5) << "call ~RecordEvent"; std::lock_guard l(profiler_mu); DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index 0ff7b73123b59a2f9254c88662fa7610b2d7b7e8..63bc1de208d198c5d55b034ed6ce2af184449bc9 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -138,6 +138,13 @@ class TestMNIST(TestParallelExecutorBase): "label": label}, use_cuda=use_cuda, use_parallel_executor=False) + parallel_first_loss, parallel_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_parallel_executor=True) self.assertAlmostEquals( np.mean(parallel_first_loss), @@ -149,8 +156,6 @@ class TestMNIST(TestParallelExecutorBase): def test_simple_fc_parallel_accuracy(self): if core.is_compiled_with_cuda(): self.check_simple_fc_parallel_accuracy(True) - self.check_simple_fc_parallel_accuracy(True) - # FIXME(Yancey1989): ParallelGraph executor type support CPU mode self.check_simple_fc_parallel_accuracy(False) def check_batchnorm_fc_convergence(self, use_cuda, use_fast_executor): @@ -171,9 +176,6 @@ class TestMNIST(TestParallelExecutorBase): for use_fast_executor in (False, True): self.check_batchnorm_fc_convergence(use_cuda, use_fast_executor) - self.check_batchnorm_fc_convergence( - use_cuda=True, use_fast_executor=False) - def test_batchnorm_fc_with_new_strategy(self): # FIXME(zcd): close this test temporally. # self._compare_reduce_and_allreduce(fc_with_batchnorm, True) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py index 4f1d902f5c380fb5141deffda8f431c1458eff0f..e7a56bb6386a812e43e5c1b5c08cd0682aa9223a 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py @@ -313,9 +313,7 @@ class TestResnet(TestParallelExecutorBase): np.mean(parallel_last_loss), single_last_loss[0], delta=delta2) def test_seresnext_with_learning_rate_decay(self): - if core.is_compiled_with_cuda(): - self._check_resnet_convergence( - model=SE_ResNeXt50Small, use_cuda=True) + self._check_resnet_convergence(model=SE_ResNeXt50Small, use_cuda=True) self._check_resnet_convergence( model=SE_ResNeXt50Small, use_cuda=False, iter=2, delta2=1e-3)