diff --git a/paddle/fluid/framework/async_executor.cc b/paddle/fluid/framework/async_executor.cc index b5c689d13d9f2b1c661c35768edc6599c24ca09e..7adfc40eb7d5b9039ecf7b51eaa3b99f167d82db 100644 --- a/paddle/fluid/framework/async_executor.cc +++ b/paddle/fluid/framework/async_executor.cc @@ -397,25 +397,25 @@ void ExecutorThreadWorker::SetMaxTrainingEpoch(int max_epoch) { max_epoch_ = max_epoch; } -MultiExecutor::MultiExecutor(const platform::Place& place) : place_(place) {} +AsyncExecutor::AsyncExecutor(const platform::Place& place) : place_(place) {} -void MultiExecutor::InitRootScope(Scope* scope) { +void AsyncExecutor::InitRootScope(Scope* scope) { root_scope_ = scope; } -void MultiExecutor::SetMaxTrainingEpoch(int max_epoch) { +void AsyncExecutor::SetMaxTrainingEpoch(int max_epoch) { max_epoch_ = max_epoch; } -void MultiExecutor::SetDataFeedName(const char* feedname) { +void AsyncExecutor::SetDataFeedName(const char* feedname) { feed_name_ = std::string(feedname); } -void MultiExecutor::SetModelPrefix(const std::string& model_prefix) { +void AsyncExecutor::SetModelPrefix(const std::string& model_prefix) { model_prefix_ = model_prefix; } -void MultiExecutor::RunStartupProgram(const ProgramDesc& program, +void AsyncExecutor::RunStartupProgram(const ProgramDesc& program, Scope* scope) { auto& block = program.Block(0); for (auto& var : block.AllVars()) { @@ -456,7 +456,7 @@ void MultiExecutor::RunStartupProgram(const ProgramDesc& program, // LOGERR("run startup program done."); } -std::unique_ptr MultiExecutor::LoadDescFromFile( +std::unique_ptr AsyncExecutor::LoadDescFromFile( const std::string& f) { std::string program_desc_str; read_binary_file(f, &program_desc_str); @@ -464,7 +464,7 @@ std::unique_ptr MultiExecutor::LoadDescFromFile( return program; } -void MultiExecutor::SetDenseCommTensor( +void AsyncExecutor::SetDenseCommTensor( const std::vector& dense_comm_tensor) { dense_comm_tensor_.resize(dense_comm_tensor.size()); for (unsigned int i = 0; i < dense_comm_tensor.size(); ++i) { @@ -472,7 +472,7 @@ void MultiExecutor::SetDenseCommTensor( } } -void MultiExecutor::SetSparseCommTensor( +void AsyncExecutor::SetSparseCommTensor( const std::vector& sparse_comm_tensor) { sparse_comm_tensor_.resize(sparse_comm_tensor.size()); for (unsigned int i = 0; i < sparse_comm_tensor.size(); ++i) { @@ -480,13 +480,13 @@ void MultiExecutor::SetSparseCommTensor( } } -void MultiExecutor::SetSparseCommData( +void AsyncExecutor::SetSparseCommData( const std::map& sparse_comm_data) { sparse_comm_data_ = sparse_comm_data; LOG(INFO) << "Sparse comm data: " << sparse_comm_data_.size(); } -void MultiExecutor::SetFileList(const char* filelist) { +void AsyncExecutor::SetFileList(const char* filelist) { filelist_.clear(); std::ifstream fin(filelist); std::string filename; @@ -497,25 +497,25 @@ void MultiExecutor::SetFileList(const char* filelist) { fin.close(); } -void MultiExecutor::SetFileList(std::vector tfiles) { +void AsyncExecutor::SetFileList(std::vector tfiles) { filelist_.clear(); filelist_.insert(filelist_.end(), tfiles.begin(), tfiles.end()); return; } -void MultiExecutor::SetInspectVarName(const std::string& inspect_var_name) { +void AsyncExecutor::SetInspectVarName(const std::string& inspect_var_name) { inspect_var_name_ = inspect_var_name; } -void MultiExecutor::SetParamNames(const std::vector& param_names) { +void AsyncExecutor::SetParamNames(const std::vector& param_names) { model_param_names_ = param_names; } -void MultiExecutor::SetThreadNum(const int thread_num) { +void AsyncExecutor::SetThreadNum(const int thread_num) { thread_num_ = thread_num; } -void MultiExecutor::PrepareThreads(const ProgramDesc& host_program) { +void AsyncExecutor::PrepareThreads(const ProgramDesc& host_program) { workers_.resize(thread_num_); for (unsigned i = 0; i < thread_num_; ++i) { workers_[i].reset(new ExecutorThreadWorker); @@ -551,7 +551,7 @@ void MultiExecutor::PrepareThreads(const ProgramDesc& host_program) { } } -void MultiExecutor::RunMultiExecutor(const ProgramDesc& host_program) { +void AsyncExecutor::RunAsyncExecutor(const ProgramDesc& host_program) { // thread binding here? PrepareThreads(host_program); for (unsigned i = 0; i < thread_num_; ++i) { diff --git a/paddle/fluid/framework/async_executor.h b/paddle/fluid/framework/async_executor.h index 855525475077aec71e2c7cea5ea3cc5cf547382e..775b845979ce5522ab163645637665609a2a17a3 100644 --- a/paddle/fluid/framework/async_executor.h +++ b/paddle/fluid/framework/async_executor.h @@ -106,10 +106,10 @@ class ExecutorThreadWorker { Scope* thread_scope_; }; -class MultiExecutor { +class AsyncExecutor { public: - explicit MultiExecutor(const platform::Place& place); - virtual ~MultiExecutor() {} + explicit AsyncExecutor(const platform::Place& place); + virtual ~AsyncExecutor() {} static std::unique_ptr LoadDescFromFile( const std::string& filename); void InitRootScope(Scope* scope); @@ -139,7 +139,7 @@ class MultiExecutor { virtual void PrepareThreads(const framework::ProgramDesc& host_program); void RunStartupProgram(const framework::ProgramDesc& program, framework::Scope* scope); - void RunMultiExecutor(const ProgramDesc& host_program); + void RunAsyncExecutor(const ProgramDesc& host_program); public: unsigned int thread_num_;