提交 9b449ede 编写于 作者: D dongdaxiang

remove some flags and unused code

上级 21d2a05c
......@@ -168,13 +168,9 @@ void ExecutorThreadWorker::CreateThreadScope(const ProgramDesc& program) {
if (var->Persistable()) {
auto* ptr = root_scope_->Var(var->Name());
CreateTensor(ptr, var->GetType());
// LOGERR("create Persistable var[%s] finished",
// var->Name().c_str());
} else {
auto* ptr = thread_scope_->Var(var->Name());
CreateTensor(ptr, var->GetType());
// LOGERR("create unpersistable var[%s] finished",
// var->Name().c_str());
}
}
}
......@@ -530,7 +526,7 @@ void AsyncExecutor::PrepareThreads(const ProgramDesc& host_program) {
workers_[i]->CreateThreadScope(host_program);
workers_[i]->SetInspectVarName(inspect_var_name_);
workers_[i]->SetModelParamNames(model_param_names_);
workers_[i]->SetSparseCommData(sparse_comm_data_);
workers_[i]->SetSparseCommData(sparse_comm_data_);
workers_[i]->SetMainProgram(host_program);
workers_[i]->SetModelPrefix(model_prefix_);
}
......@@ -540,9 +536,7 @@ void AsyncExecutor::PrepareThreads(const ProgramDesc& host_program) {
// filelist is static so that we only add filelist once
workers_[0]->AddTrainFile(filelist_[i]);
}
// mpi_wrapper::ModelParam model_param(true);
// workers_[0]->register_parallel_training_param(model_param);
for (unsigned i = 0; i < thread_num_; ++i) {
// new a datafeed here
std::shared_ptr<DataFeed> local_feed = CreateDataFeed(feed_name_.c_str());
......
......@@ -60,11 +60,7 @@ void BindAsyncExecutor(py::module* m) {
for (int i = 0; i < base_param.model_param_names_size(); ++i) {
param_names.push_back(base_param.model_param_names(i));
}
#ifdef FORK_V1
paddle::framework::InitDevices();
#else
paddle::framework::InitDevices(false);
#endif
self.InitRootScope(scope);
self.SetThreadNum(base_param.thread_num());
self.SetMaxTrainingEpoch(base_param.max_epoch());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册