提交 06936a2f 编写于 作者: Y Yancey1989

fix 1gpu test=develop

上级 d3a4da5c
......@@ -51,7 +51,8 @@ void AllReduceOpHandle::RunImpl() {
// FIXME(typhoonzero): If scope0(global scope) have NCCL_ID_VAR,
// this is a distributed or inter-process call, find a better way.
#ifdef PADDLE_WITH_CUDA
// Find NCCL ID from the global scope.
// All-reduce op_handle can run on the sub-scope, find the nccl id from
// the global scope.
if (NoDummyInputSize() == 1 &&
local_scopes_[0]->FindVar(NCCL_ID_VARNAME) == nullptr) {
#else
......
......@@ -59,7 +59,7 @@ FeedFetchList ParallelSSAGraphExecutor::Run(
if (pool_) {
run_futures.emplace_back(pool_->enqueue(std::move(call)));
} else {
call();
fetch_datas.emplace_back(std::move(call()));
}
}
......
......@@ -231,7 +231,7 @@ ParallelExecutor::ParallelExecutor(
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
ncclUniqueId *nccl_id = nullptr;
if (build_strategy.enable_parallel_graph_) {
if (build_strategy.enable_parallel_graph_ && places.size() > 1) {
// parallel graph mode should initialize nccl by ncclCommInitRank since
// it call nccl operator per device per thread.
if (nccl_id_var == nullptr) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册