diff --git a/paddle/fluid/framework/async_executor.cc b/paddle/fluid/framework/async_executor.cc index 7754c84d5f034181fac882298719d4e44189823a..bfdb5848338cb9fc9c45ca7bf5119b0e50704931 100644 --- a/paddle/fluid/framework/async_executor.cc +++ b/paddle/fluid/framework/async_executor.cc @@ -104,6 +104,7 @@ void AsyncExecutor::SaveModel(const std::string& path) { } void AsyncExecutor::RunFromFile(const ProgramDesc& main_program, +<<<<<<< HEAD const std::string& data_feed_desc_str, const std::vector& filelist, const int thread_num, @@ -192,6 +193,25 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program, _pull_dense_thread->stop(); } #endif +======= + const std::string& trainer_desc_str, + const bool debug) { + TrainerDesc trainer_desc; + google::protobuf::TextFormat::ParseFromString(trainer_desc_str, + &trainer_desc); + std::shared_ptr trainer; + trainer = TrainerFactory::CreateTrainer(trainer_desc.class_name()); + // initialize trainer + trainer->Initialize(trainer_desc); + // trainer->SetRootScope(root_scope_); + trainer->SetDebug(debug); + // prepare training environment and helper environment + trainer->InitTrainerEnv(main_program, place_); + trainer->InitOtherEnv(main_program); + // training and finalize training + trainer->Run(); + trainer->Finalize(); +>>>>>>> add dist_multi_trainer for distributed training, add trainer_factory and device_worker_factory so that we can easily extend new training mode, add pull dense worker which is a singleton for parameter fetching root_scope_->DropKids(); return;