提交 3238ce06 编写于 作者: Y Yu Yang

Add wait

上级 8a9de67e
......@@ -813,6 +813,16 @@ void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
fetch_ops.clear();
*member_->global_scope_->Var(fetched_var_name)->GetMutable<LoDTensorArray>() =
fetched_data->tensors_;
// FIXME:
// It could be optimized by using multiple events in an operator.
// Manually sync computation during iter.
for (auto &s : member_->communication_streams_) {
s.second.ctx_->Wait();
}
for (auto &p : member_->places_) {
platform::DeviceContextPool::Instance().Get(p)->Wait();
}
}
void ParallelExecutor::RunOp(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册