From 82b42e31f063ddf4210e43e8daba044878aa8d58 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Fri, 28 Dec 2018 18:32:31 +0800 Subject: [PATCH] polish unittest test=develop --- paddle/fluid/platform/profiler.cc | 1 - .../tests/unittests/test_parallel_executor_mnist.py | 12 +++++++----- .../unittests/test_parallel_executor_seresnext.py | 4 +--- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 040a68f6726..85977366e61 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -186,7 +186,6 @@ RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) RecordEvent::~RecordEvent() { if (g_state == ProfilerState::kDisabled || !is_enabled_) return; - VLOG(5) << "call ~RecordEvent"; std::lock_guard l(profiler_mu); DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index 0ff7b73123b..63bc1de208d 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -138,6 +138,13 @@ class TestMNIST(TestParallelExecutorBase): "label": label}, use_cuda=use_cuda, use_parallel_executor=False) + parallel_first_loss, parallel_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_parallel_executor=True) self.assertAlmostEquals( np.mean(parallel_first_loss), @@ -149,8 +156,6 @@ class TestMNIST(TestParallelExecutorBase): def test_simple_fc_parallel_accuracy(self): if core.is_compiled_with_cuda(): self.check_simple_fc_parallel_accuracy(True) - self.check_simple_fc_parallel_accuracy(True) - # FIXME(Yancey1989): ParallelGraph executor type support CPU mode self.check_simple_fc_parallel_accuracy(False) def check_batchnorm_fc_convergence(self, use_cuda, use_fast_executor): @@ -171,9 +176,6 @@ class TestMNIST(TestParallelExecutorBase): for use_fast_executor in (False, True): self.check_batchnorm_fc_convergence(use_cuda, use_fast_executor) - self.check_batchnorm_fc_convergence( - use_cuda=True, use_fast_executor=False) - def test_batchnorm_fc_with_new_strategy(self): # FIXME(zcd): close this test temporally. # self._compare_reduce_and_allreduce(fc_with_batchnorm, True) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py index 4f1d902f5c3..e7a56bb6386 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py @@ -313,9 +313,7 @@ class TestResnet(TestParallelExecutorBase): np.mean(parallel_last_loss), single_last_loss[0], delta=delta2) def test_seresnext_with_learning_rate_decay(self): - if core.is_compiled_with_cuda(): - self._check_resnet_convergence( - model=SE_ResNeXt50Small, use_cuda=True) + self._check_resnet_convergence(model=SE_ResNeXt50Small, use_cuda=True) self._check_resnet_convergence( model=SE_ResNeXt50Small, use_cuda=False, iter=2, delta2=1e-3) -- GitLab