提交 9c2d7df8 编写于 作者: Q qiaolongfei

optimize code

上级 570be391
...@@ -48,6 +48,13 @@ static void ParallelExecuteBlocks( ...@@ -48,6 +48,13 @@ static void ParallelExecuteBlocks(
for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); for (size_t i = 0; i < fs.size(); ++i) fs[i].wait();
} }
static void SavePort(std::shared_ptr<detail::AsyncGRPCServer> rpc_service) {
std::ofstream port_file;
port_file.open("/tmp/paddle.selected_port");
port_file << rpc_service->GetSelectedPort();
port_file.close();
}
ListenAndServOp::ListenAndServOp(const std::string &type, ListenAndServOp::ListenAndServOp(const std::string &type,
const framework::VariableNameMap &inputs, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs, const framework::VariableNameMap &outputs,
...@@ -66,9 +73,8 @@ void ListenAndServOp::Stop() { ...@@ -66,9 +73,8 @@ void ListenAndServOp::Stop() {
void ListenAndServOp::PreparePrefetchCtx( void ListenAndServOp::PreparePrefetchCtx(
framework::Executor *executor, framework::BlockDesc *prefetch_block, framework::Executor *executor, framework::BlockDesc *prefetch_block,
framework::ProgramDesc *program) const { framework::ProgramDesc *program) const {
// TODO(qiao) set proper fields for table lookup and update
rpc_service_->SetExecutor(executor);
VLOG(3) << "prefetch block id is " << prefetch_block->ID(); VLOG(3) << "prefetch block id is " << prefetch_block->ID();
rpc_service_->SetExecutor(executor);
auto prefetch_prepared = executor->Prepare(*program, prefetch_block->ID()); auto prefetch_prepared = executor->Prepare(*program, prefetch_block->ID());
rpc_service_->SetPrefetchBlkdId(prefetch_block->ID()); rpc_service_->SetPrefetchBlkdId(prefetch_block->ID());
rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get());
...@@ -134,7 +140,7 @@ void ListenAndServOp::RunSyncUpdate( ...@@ -134,7 +140,7 @@ void ListenAndServOp::RunSyncUpdate(
break; break;
} }
// NOTE: if is_gpu_place, CUDA kernels are launch by multiple threads // NOTE: if is_gpu_place, CUDA kernels are launched by multiple threads
// and this will still work. // and this will still work.
// The optimize blocks which have the same parent ID would run parallel // The optimize blocks which have the same parent ID would run parallel
...@@ -173,13 +179,6 @@ void ListenAndServOp::RunSyncUpdate( ...@@ -173,13 +179,6 @@ void ListenAndServOp::RunSyncUpdate(
} // while(true) } // while(true)
} }
static void SavePort(std::shared_ptr<detail::AsyncGRPCServer> rpc_service) {
std::ofstream port_file;
port_file.open("/tmp/paddle.selected_port");
port_file << rpc_service->GetSelectedPort();
port_file.close();
}
void ListenAndServOp::RunImpl(const framework::Scope &scope, void ListenAndServOp::RunImpl(const framework::Scope &scope,
const platform::Place &dev_place) const { const platform::Place &dev_place) const {
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册