dist_multi_trainer.cc 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16 17 18
#if defined(PADDLE_WITH_PSCORE)
#include "paddle/fluid/distributed/ps/wrapper/fleet.h"
#endif

19
#include "paddle/fluid/framework/convert_utils.h"
20 21 22 23 24 25
#include "paddle/fluid/framework/device_worker_factory.h"
#include "paddle/fluid/framework/trainer.h"

namespace paddle {
namespace framework {

26 27
void DistMultiTrainer::Initialize(const TrainerDesc &trainer_desc,
                                  Dataset *dataset) {
28
  thread_num_ = trainer_desc.thread_num();
29
  SetDataset(dataset);
D
dongdaxiang 已提交
30

H
hutuxian 已提交
31
  ParseDumpConfig(trainer_desc);
X
xujiaqi01 已提交
32 33
  mpi_rank_ = trainer_desc.mpi_rank();
  mpi_size_ = trainer_desc.mpi_size();
T
Thunderbrook 已提交
34
  dump_file_num_ = trainer_desc.dump_file_num();
Y
yaoxuefeng 已提交
35
  user_define_dump_filename_ = trainer_desc.user_define_dump_filename();
36
  const std::vector<paddle::framework::DataFeed *> readers =
37
      dataset->GetReaders();
T
Thunderbrook 已提交
38
  RegisterHeterCallback();
39 40
  thread_num_ = readers.size();
  workers_.resize(thread_num_);
41 42 43 44 45
  for (int i = 0; i < trainer_desc.downpour_param().stat_var_names_size();
       i++) {
    need_merge_var_names_.push_back(
        trainer_desc.downpour_param().stat_var_names(i));
  }
46

47 48 49 50
  for (int i = 0; i < thread_num_; ++i) {
    workers_[i] = DeviceWorkerFactory::CreateDeviceWorker(
        trainer_desc.device_worker_name());
    workers_[i]->SetDeviceIndex(i);
D
dongdaxiang 已提交
51
    workers_[i]->SetDataFeed(readers[i]);
H
hutuxian 已提交
52 53 54 55 56
    workers_[i]->SetNeedDumpField(need_dump_field_);
    workers_[i]->SetNeedDumpParam(need_dump_param_);
    workers_[i]->SetDumpFieldVector(dump_fields_);
    workers_[i]->SetDumpParamVector(dump_param_);
    workers_[i]->InitRandomDumpConfig(trainer_desc);
57
    workers_[i]->Initialize(trainer_desc);
T
Thunderbrook 已提交
58
    workers_[i]->SetWorkerNum(thread_num_);
59 60
  }

D
dongdaxiang 已提交
61
  VLOG(3) << "going to initialize pull dense worker";
62 63
  pull_dense_worker_ = PullDenseWorker::GetInstance();
  pull_dense_worker_->Initialize(trainer_desc);
D
dongdaxiang 已提交
64
  VLOG(3) << "initialize pull dense worker";
65
  SetDebug(trainer_desc.debug());
66 67
}

T
Thunderbrook 已提交
68
void DistMultiTrainer::RegisterHeterCallback() {
69 70 71
#ifdef PADDLE_WITH_PSCORE
  auto fleet_ptr = paddle::distributed::FleetWrapper::GetInstance();
#else
T
Thunderbrook 已提交
72
  auto fleet_ptr = FleetWrapper::GetInstance();
73
#endif
T
Thunderbrook 已提交
74 75
  fleet_ptr->RegisterHeterCallback(
      [this](int worker, int taskid) { workers_[worker]->Schedule(taskid); });
T
Thunderbrook 已提交
76 77
}

78 79 80 81 82
void DistMultiTrainer::InitDumpEnv() {
  queue_ = paddle::framework::MakeChannel<std::string>();
  for (int i = 0; i < thread_num_; ++i) {
    workers_[i]->SetChannelWriter(queue_.get());
  }
T
Thunderbrook 已提交
83 84 85 86 87 88 89 90 91
  dump_thread_num_ = 1;
  if (dump_file_num_ > mpi_size_) {
    dump_thread_num_ = dump_file_num_ / mpi_size_;
    if (dump_file_num_ % mpi_size_ > mpi_rank_) {
      dump_thread_num_ += 1;
    }
  }
  for (int i = 0; i < dump_thread_num_; i++) {
    dump_thread_.push_back(
H
hutuxian 已提交
92
        std::thread(std::bind(&TrainerBase::DumpWork, this, i)));
T
Thunderbrook 已提交
93
  }
94 95
}

96 97 98 99 100 101 102 103
void DistMultiTrainer::InitTrainerEnv(const ProgramDesc &main_program,
                                      const platform::Place &place) {
  for (int i = 0; i < thread_num_; ++i) {
    workers_[i]->SetPlace(place);
    workers_[i]->SetReaderPlace(place);
    workers_[i]->SetRootScope(root_scope_);
    workers_[i]->CreateDeviceResource(main_program);  // Program
    workers_[i]->BindingDataFeedMemory();
104
#if defined(PADDLE_WITH_PSLIB) || defined(PADDLE_WITH_PSCORE)
T
Thunderbrook 已提交
105 106
    workers_[i]->CacheProgram(main_program);
#endif
107 108 109 110 111 112 113 114
  }
  // Scope* -> thread id, it will be used in push_dense op
  for (int i = 0; i < thread_num_; ++i) {
    Scope *thread_scope = workers_[i]->GetThreadScope();
    pull_dense_worker_->SetThreadIdByScope(thread_scope, i);
  }
}

115
void DistMultiTrainer::InitOtherEnv(const ProgramDesc &main_program) {
X
xujiaqi01 已提交
116
  if (need_dump_field_ || need_dump_param_) {
117 118
    InitDumpEnv();
  }
119
  pull_dense_worker_->SetRootScope(root_scope_);
Z
zhaocaibei123 已提交
120 121 122
#if defined(PADDLE_WITH_PSCORE) && defined(PADDLE_WITH_CUDA)
  pull_dense_worker_->CreatePinVar();
#endif
123
  pull_dense_worker_->Start();
124
#if defined(PADDLE_WITH_PSLIB) || defined(PADDLE_WITH_PSCORE)
T
Thunderbrook 已提交
125 126 127 128
  for (int i = 0; i < thread_num_; ++i) {
    workers_[i]->GetXpuOpIndex();
  }
#endif
D
dongdaxiang 已提交
129
  VLOG(3) << "init other env done.";
130 131
}

132 133 134 135 136 137 138 139 140 141 142 143
void DistMultiTrainer::Run() {
  for (int thidx = 0; thidx < thread_num_; ++thidx) {
    if (!debug_) {
      threads_.push_back(
          std::thread(&DeviceWorker::TrainFiles, workers_[thidx].get()));
    } else {
      threads_.push_back(std::thread(&DeviceWorker::TrainFilesWithProfiler,
                                     workers_[thidx].get()));
    }
  }
}

144 145 146 147
Scope *DistMultiTrainer::GetWorkerScope(int thread_id) {
  return workers_[thread_id]->GetThreadScope();
}

148
void DistMultiTrainer::Finalize() {
149
  for (auto &th : threads_) {
150 151
    th.join();
  }
152
  for (size_t i = 0; i < need_merge_var_names_.size(); i++) {
153 154 155 156 157 158 159 160 161 162 163 164 165
    Variable *root_var = root_scope_->FindVar(need_merge_var_names_[i]);
    if (root_var == nullptr) {
      continue;
    }
    LoDTensor *root_tensor = root_var->GetMutable<LoDTensor>();
    for (int j = 1; j < thread_num_; j++) {
      Scope *cur_thread_scope = workers_[j]->GetThreadScope();
      Variable *thread_var =
          cur_thread_scope->FindVar(need_merge_var_names_[i]);
      LoDTensor *thread_tensor = thread_var->GetMutable<LoDTensor>();
      if (root_tensor->numel() != thread_tensor->numel()) {
        continue;
      }
166 167
#define MergeCallback(cpp_type, proto_type)                                    \
  do {                                                                         \
168 169 170
    if (framework::TransToProtoVarType(root_tensor->dtype()) == proto_type) {  \
      if (framework::TransToProtoVarType(thread_tensor->dtype()) !=            \
          proto_type) {                                                        \
171 172
        VLOG(0) << "Error: thread id=" << j << ", need_merge_var_names_[" << i \
                << "] " << need_merge_var_names_[i]                            \
173 174
                << ", root tensor type=" << root_tensor->dtype()               \
                << ", thread tensor type=" << thread_tensor->dtype();          \
175 176 177 178
        exit(-1);                                                              \
      }                                                                        \
      MergeToRootScope<cpp_type>(root_tensor, thread_tensor);                  \
    }                                                                          \
179 180 181 182 183
  } while (0)
      _ForEachDataType_(MergeCallback);
    }
  }

X
xujiaqi01 已提交
184
  if (need_dump_field_ || need_dump_param_) {
185 186
    FinalizeDumpEnv();
  }
187
  pull_dense_worker_->Stop();
188
  root_scope_->DropKids();
189

190 191 192 193
// flush local client push queue
#ifdef PADDLE_WITH_PSCORE
  auto fleet_ptr_ = paddle::distributed::FleetWrapper::GetInstance();
#else
194
  auto fleet_ptr_ = FleetWrapper::GetInstance();
195
#endif
196
  fleet_ptr_->ClientFlush();
197 198
}

199 200 201 202 203 204 205 206 207
template <typename T>
void DistMultiTrainer::MergeToRootScope(LoDTensor *root_tensor,
                                        LoDTensor *tensor) {
  T *root_data = root_tensor->data<T>();
  T *data = tensor->data<T>();
  for (int i = 0; i < tensor->numel(); i++) {
    root_data[i] += data[i];
  }
}
208 209
}  // namespace framework
}  // namespace paddle