提交 a7d6b1f9 编写于 作者: Y Yancey1989

code cleanup test=develop

上级 a760a550
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/op_handle_base.h" #include "paddle/fluid/framework/details/op_handle_base.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
......
...@@ -134,6 +134,7 @@ static const char kParams[] = "params"; ...@@ -134,6 +134,7 @@ static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes"; static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy"; static const char kStrategy[] = "strategy";
static const char kNumTrainers[] = "num_trainers"; static const char kNumTrainers[] = "num_trainers";
static const char kNumLossScaled[] = "num_loss_scaled";
void MultiDevSSAGraphBuilder::Init() const { void MultiDevSSAGraphBuilder::Init() const {
all_vars_.clear(); all_vars_.clear();
......
...@@ -41,10 +41,12 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( ...@@ -41,10 +41,12 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
Scope &local_scope = scope->NewScope(); Scope &local_scope = scope->NewScope();
*scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>() = *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>() =
&local_scope; &local_scope;
for (auto &info : var_infos_) { for (auto &info : var_infos_) {
if (scope->FindVar(info.name_) != nullptr) { if (scope->FindVar(info.name_) != nullptr) {
continue; continue;
} }
if (info.persistable_) { // Persistable if (info.persistable_) { // Persistable
InitializeVariable(scope->Var(info.name_), info.type_); InitializeVariable(scope->Var(info.name_), info.type_);
} else { } else {
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <functional> #include <functional>
#include "ThreadPool.h" // ThreadPool in thrird party #include "ThreadPool.h" // ThreadPool in thrird party
#include "paddle/fluid/framework/blocking_queue.h" #include "paddle/fluid/framework/blocking_queue.h"
#include "paddle/fluid/framework/details/computation_op_handle.h"
#include "paddle/fluid/framework/details/exception_holder.h" #include "paddle/fluid/framework/details/exception_holder.h"
#include "paddle/fluid/framework/details/execution_strategy.h" #include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/fetch_op_handle.h"
......
...@@ -20,7 +20,7 @@ namespace details { ...@@ -20,7 +20,7 @@ namespace details {
VarHandleBase::~VarHandleBase() {} VarHandleBase::~VarHandleBase() {}
VarHandle::~VarHandle() { VLOG(5) << "deleting var handle " << DebugString(); } VarHandle::~VarHandle() { VLOG(4) << "deleting var handle " << DebugString(); }
std::string VarHandle::DebugString() const { std::string VarHandle::DebugString() const {
std::stringstream ss; std::stringstream ss;
......
...@@ -49,6 +49,7 @@ class Node { ...@@ -49,6 +49,7 @@ class Node {
public: public:
virtual ~Node() { virtual ~Node() {
if (!wrapper_.empty()) { if (!wrapper_.empty()) {
VLOG(4) << "ir::Node deleting a wrapper node " << Name();
wrapper_deleter_(); wrapper_deleter_();
} }
} }
......
...@@ -17,8 +17,6 @@ import unittest ...@@ -17,8 +17,6 @@ import unittest
import logging import logging
import six import six
ExecutorType = fluid.ExecutionStrategy().ExecutorType
class TestBase(unittest.TestCase): class TestBase(unittest.TestCase):
def main(self, def main(self,
...@@ -26,7 +24,7 @@ class TestBase(unittest.TestCase): ...@@ -26,7 +24,7 @@ class TestBase(unittest.TestCase):
iter=10, iter=10,
iter_per_pe=10, iter_per_pe=10,
use_gpu=True, use_gpu=True,
exec_type=ExecutorType.Default): use_experimental_executor=False):
if use_gpu and not fluid.core.is_compiled_with_cuda(): if use_gpu and not fluid.core.is_compiled_with_cuda():
logging.warning( logging.warning(
"Paddle is not compiled with CUDA, skip GPU unittests") "Paddle is not compiled with CUDA, skip GPU unittests")
...@@ -45,7 +43,7 @@ class TestBase(unittest.TestCase): ...@@ -45,7 +43,7 @@ class TestBase(unittest.TestCase):
for _ in six.moves.xrange(iter): for _ in six.moves.xrange(iter):
exe_strategy = fluid.ExecutionStrategy() exe_strategy = fluid.ExecutionStrategy()
exe_strategy._dry_run = True exe_strategy._dry_run = True
exe_strategy.executor_type = exec_type exe_strategy.use_experimental_executor = use_experimental_executor
pe = fluid.ParallelExecutor( pe = fluid.ParallelExecutor(
use_cuda=use_gpu, use_cuda=use_gpu,
loss_name=loss.name, loss_name=loss.name,
...@@ -58,11 +56,11 @@ class TestBase(unittest.TestCase): ...@@ -58,11 +56,11 @@ class TestBase(unittest.TestCase):
class TestMNISTDryRun(TestBase): class TestMNISTDryRun(TestBase):
def test_mnist_dry_run(self): def test_mnist_dry_run(self):
for use_gpu in (False, True): for use_gpu in (False, True):
for exec_type in (ExecutorType.Default, ExecutorType.Experimental): for use_experimental_executor in (False, True):
self.main( self.main(
network_func=TestMNISTDryRun.network_func, network_func=TestMNISTDryRun.network_func,
use_gpu=use_gpu, use_gpu=use_gpu,
exec_type=exec_type) use_experimental_executor=use_experimental_executor)
@staticmethod @staticmethod
def network_func(): def network_func():
......
...@@ -79,26 +79,25 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -79,26 +79,25 @@ class TestMNIST(TestParallelExecutorBase):
return return
img, label = self._init_data() img, label = self._init_data()
"""
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence( all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
model, model,
feed_dict={"image": img, feed_dict={"image": img,
"label": label}, "label": label},
use_cuda=use_cuda, use_cuda=use_cuda,
use_reduce=False) use_reduce=False)
"""
reduce_first_loss, reduce_last_loss = self.check_network_convergence( reduce_first_loss, reduce_last_loss = self.check_network_convergence(
model, model,
feed_dict={"image": img, feed_dict={"image": img,
"label": label}, "label": label},
use_cuda=use_cuda, use_cuda=use_cuda,
use_reduce=True) use_reduce=True)
"""
for loss in zip(all_reduce_first_loss, reduce_first_loss): for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEqual(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
for loss in zip(all_reduce_last_loss, reduce_last_loss): for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEqual(loss[0], loss[1], delta=1e-4) self.assertAlmostEqual(loss[0], loss[1], delta=1e-4)
"""
# simple_fc # simple_fc
def check_simple_fc_convergence(self, def check_simple_fc_convergence(self,
...@@ -118,7 +117,7 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -118,7 +117,7 @@ class TestMNIST(TestParallelExecutorBase):
use_reduce=use_reduce, use_reduce=use_reduce,
use_parallel_graph=use_parallel_graph) use_parallel_graph=use_parallel_graph)
def notest_simple_fc(self): def test_simple_fc(self):
# use_cuda # use_cuda
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
self.check_simple_fc_convergence(True) self.check_simple_fc_convergence(True)
...@@ -126,7 +125,7 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -126,7 +125,7 @@ class TestMNIST(TestParallelExecutorBase):
True, use_reduce=False, use_parallel_graph=True) True, use_reduce=False, use_parallel_graph=True)
self.check_simple_fc_convergence(False) self.check_simple_fc_convergence(False)
def notest_simple_fc_with_new_strategy(self): def test_simple_fc_with_new_strategy(self):
# use_cuda, use_reduce # use_cuda, use_reduce
self._compare_reduce_and_allreduce(simple_fc_net, True) self._compare_reduce_and_allreduce(simple_fc_net, True)
self._compare_reduce_and_allreduce(simple_fc_net, False) self._compare_reduce_and_allreduce(simple_fc_net, False)
...@@ -163,7 +162,7 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -163,7 +162,7 @@ class TestMNIST(TestParallelExecutorBase):
self.assertAlmostEquals( self.assertAlmostEquals(
np.mean(parallel_last_loss), single_last_loss, delta=1e-6) np.mean(parallel_last_loss), single_last_loss, delta=1e-6)
def notest_simple_fc_parallel_accuracy(self): def test_simple_fc_parallel_accuracy(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
self.check_simple_fc_parallel_accuracy(True) self.check_simple_fc_parallel_accuracy(True)
self.check_simple_fc_parallel_accuracy( self.check_simple_fc_parallel_accuracy(
...@@ -192,7 +191,9 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -192,7 +191,9 @@ class TestMNIST(TestParallelExecutorBase):
for use_cuda in (False, True): for use_cuda in (False, True):
for use_fast_executor in (False, True): for use_fast_executor in (False, True):
self.check_batchnorm_fc_convergence(use_cuda, use_fast_executor) self.check_batchnorm_fc_convergence(use_cuda, use_fast_executor)
self.check_batchnorm_fc_convergence(use_cuda, False, True)
self.check_batchnorm_fc_convergence(
use_cuda=True, use_fast_executor=False, use_parallel_graph=True)
def test_batchnorm_fc_with_new_strategy(self): def test_batchnorm_fc_with_new_strategy(self):
# FIXME(zcd): close this test temporally. # FIXME(zcd): close this test temporally.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册