未验证 提交 364376e5 编写于 作者: L Leo Chen 提交者: GitHub

[new-exec] enable sequential run for debug (#37835)

* enable sequential_run for standalone_executor

* add ut

* fix ut
上级 dc0ec667
...@@ -20,9 +20,26 @@ ...@@ -20,9 +20,26 @@
#include "paddle/fluid/operators/controlflow/recurrent_op_helper.h" #include "paddle/fluid/operators/controlflow/recurrent_op_helper.h"
#include "paddle/fluid/operators/controlflow/while_op_helper.h" #include "paddle/fluid/operators/controlflow/while_op_helper.h"
PADDLE_DEFINE_EXPORTED_bool(
new_executor_sequential_run, false,
"Enable sequential execution for standalone executor, used for debug");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
namespace interpreter { namespace interpreter {
void AsyncWorkQueue::AddTask(const OpFuncType& op_func_type,
std::function<void()> fn) {
// NOTE(zhiqiu): use thhe second queue of size of, so only one thread is used.
if (FLAGS_new_executor_sequential_run) {
VLOG(4) << "FLAGS_new_executor_sequential_run:"
<< FLAGS_new_executor_sequential_run;
queue_group_->AddTask(static_cast<size_t>(OpFuncType::kQueueAsync),
std::move(fn));
} else {
queue_group_->AddTask(static_cast<size_t>(op_func_type), std::move(fn));
}
}
using VariableIdMap = std::map<std::string, std::vector<int>>; using VariableIdMap = std::map<std::string, std::vector<int>>;
AtomicVectorSizeT& AsyncWorkQueue::PrepareAtomicDeps( AtomicVectorSizeT& AsyncWorkQueue::PrepareAtomicDeps(
......
...@@ -77,9 +77,7 @@ class AsyncWorkQueue { ...@@ -77,9 +77,7 @@ class AsyncWorkQueue {
// void WaitEmpty() { queue_group_->WaitQueueGroupEmpty(); } // void WaitEmpty() { queue_group_->WaitQueueGroupEmpty(); }
void AddTask(const OpFuncType& op_func_type, std::function<void()> fn) { void AddTask(const OpFuncType& op_func_type, std::function<void()> fn);
queue_group_->AddTask(static_cast<size_t>(op_func_type), std::move(fn));
}
void Cancel() { queue_group_->Cancel(); } void Cancel() { queue_group_->Cancel(); }
......
...@@ -130,6 +130,10 @@ class MultiStreamModelTestCase(unittest.TestCase): ...@@ -130,6 +130,10 @@ class MultiStreamModelTestCase(unittest.TestCase):
for gt, out in zip(ground_truths, res): for gt, out in zip(ground_truths, res):
self.assertEqual(gt[0], out[0]) self.assertEqual(gt[0], out[0])
res_sequential = self.run_new_executor_sequential()
for gt, out in zip(ground_truths, res_sequential):
self.assertEqual(gt[0], out[0])
def run_raw_executor(self): def run_raw_executor(self):
paddle.seed(2020) paddle.seed(2020)
main_program, startup_program, fetch_list = build_program() main_program, startup_program, fetch_list = build_program()
...@@ -158,6 +162,12 @@ class MultiStreamModelTestCase(unittest.TestCase): ...@@ -158,6 +162,12 @@ class MultiStreamModelTestCase(unittest.TestCase):
np.array(inter_core.run({}, fetch_list)._move_to_list()[0])) np.array(inter_core.run({}, fetch_list)._move_to_list()[0]))
return outs return outs
def run_new_executor_sequential(self):
os.environ['FLAGS_new_executor_sequential_run'] = '1'
res = self.run_new_executor()
del os.environ['FLAGS_new_executor_sequential_run']
return res
class SwitchExecutorInterfaceTestCase(MultiStreamModelTestCase): class SwitchExecutorInterfaceTestCase(MultiStreamModelTestCase):
def run_new_executor(self): def run_new_executor(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册