trainer.h 8.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <fstream>
S
sandyhouse 已提交
18
#include <map>
19 20 21 22 23 24
#include <memory>
#include <mutex>  // NOLINT
#include <string>
#include <thread>  // NOLINT
#include <vector>

T
Thunderbrook 已提交
25
#include <ctime>
26
#include "paddle/fluid/framework/data_feed.h"
D
dongdaxiang 已提交
27
#include "paddle/fluid/framework/data_set.h"
28
#include "paddle/fluid/framework/device_worker.h"
T
Thunderbrook 已提交
29 30
#include "paddle/fluid/framework/fleet/heter_wrapper.h"
#include "paddle/fluid/framework/heter_service.h"
31 32 33 34 35 36
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/trainer_desc.pb.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/reader/blocking_queue.h"
D
dongdaxiang 已提交
37
#include "paddle/fluid/platform/port.h"
38 39 40 41 42 43 44 45 46 47 48

namespace paddle {
namespace framework {

class TrainerBase {
 public:
  TrainerBase() {}
  virtual ~TrainerBase() {}
  // model memory are hosted in root_scope
  void SetScope(Scope* root_scope);
  void SetDebug(const bool debug) { debug_ = debug; }
49
  void SetDataset(Dataset* dataset_ptr) { dataset_ptr_ = dataset_ptr; }
D
dongdaxiang 已提交
50
  virtual void Initialize(const TrainerDesc& trainer_desc,
51
                          Dataset* data_set) = 0;
52 53 54 55 56
  virtual void InitTrainerEnv(const ProgramDesc& main_program,
                              const platform::Place& place) = 0;
  virtual void InitOtherEnv(const ProgramDesc& main_program) = 0;
  virtual void Run() = 0;
  virtual void Finalize() = 0;
57
  virtual Scope* GetWorkerScope(int thread_id) = 0;
H
hutuxian 已提交
58 59
  virtual void InitDumpEnv() = 0;
  virtual void DumpWork(int tid);
60 61

 protected:
H
hutuxian 已提交
62 63 64 65
  virtual std::string GetDumpPath(int tid) = 0;
  virtual void ParseDumpConfig(const TrainerDesc& trainer_desc);
  virtual void FinalizeDumpEnv();

66 67
  Scope* root_scope_;
  bool debug_;
68
  Dataset* dataset_ptr_;
T
Thunderbrook 已提交
69
  TrainerDesc trainer_desc_;
H
hutuxian 已提交
70 71 72 73 74 75 76 77 78 79 80

  // For dump param or field
  bool need_dump_field_ = false;
  bool need_dump_param_ = false;
  std::string dump_fields_path_;
  std::string dump_converter_;
  std::vector<std::string> dump_param_;
  std::vector<std::string> dump_fields_;
  int dump_thread_num_;
  std::vector<std::thread> dump_thread_;
  std::shared_ptr<paddle::framework::ChannelObject<std::string>> queue_;
81 82 83 84 85 86 87 88 89
};

// general trainer for async execution
// local trainer and distributed trainer are supported
// depends on the assigned device_worker
class MultiTrainer : public TrainerBase {
 public:
  MultiTrainer() {}
  virtual ~MultiTrainer() {}
D
dongdaxiang 已提交
90
  virtual void Initialize(const TrainerDesc& trainer_desc, Dataset* data_set);
91 92
  virtual void InitTrainerEnv(const ProgramDesc& main_program,
                              const platform::Place& place);
93
  virtual void InitOtherEnv(const ProgramDesc& main_program);
94 95
  virtual void Run();
  virtual void Finalize();
96
  virtual void InitDumpEnv();
97
  virtual Scope* GetWorkerScope(int thread_id);
H
hutuxian 已提交
98
  virtual std::string GetDumpPath(int tid);
99 100 101 102

 protected:
  int thread_num_;
  std::vector<std::thread> threads_;
J
jiaqi 已提交
103
  std::vector<DataFeed*> readers_;
104
  std::vector<std::shared_ptr<DeviceWorker>> workers_;
105
  std::vector<std::string> need_merge_var_names_;
106 107 108 109

  int mpi_rank_;
  int mpi_size_;
  int dump_file_num_;
110 111 112 113 114 115
};

class DistMultiTrainer : public MultiTrainer {
 public:
  DistMultiTrainer() {}
  virtual ~DistMultiTrainer() {}
D
dongdaxiang 已提交
116
  virtual void Initialize(const TrainerDesc& trainer_desc, Dataset* data_set);
117 118
  virtual void InitTrainerEnv(const ProgramDesc& main_program,
                              const platform::Place& place);
119
  virtual void InitOtherEnv(const ProgramDesc& main_program);
120
  virtual void Run();
121
  virtual void Finalize();
122 123
  template <typename T>
  void MergeToRootScope(LoDTensor* root_tensor, LoDTensor* thread_tensor);
124
  virtual void InitDumpEnv();
125
  virtual Scope* GetWorkerScope(int thread_id);
T
Thunderbrook 已提交
126
  virtual void RegisterHeterCallback();
127 128 129 130 131

 protected:
  std::shared_ptr<paddle::framework::PullDenseWorker> pull_dense_worker_;
};

T
Thunderbrook 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
#if (defined PADDLE_WITH_CUDA) && (defined PADDLE_WITH_PSLIB)
class HeterServiceContext {
 public:
  HeterServiceContext() {}
  virtual ~HeterServiceContext() {
    for (OperatorBase* op : ops_) {
      delete op;
    }
    std::vector<OperatorBase*>().swap(ops_);
  }
  void Reset() { push_dense_status_.clear(); }
  int place_num_;
  Scope* scope_{nullptr};
  cudaEvent_t event_;
  std::vector<OperatorBase*> ops_;
  std::vector<::std::future<int32_t>> push_dense_status_;
};

class HeterXpuTrainer : public TrainerBase {
 public:
  HeterXpuTrainer() {}
  virtual ~HeterXpuTrainer() {
    for (OperatorBase* op : ops_) {
      delete op;
    }
    std::vector<OperatorBase*>().swap(ops_);
  }
  virtual void Initialize(const TrainerDesc& trainer_desc, Dataset* data_set);
  virtual void InitTrainerEnv(const ProgramDesc& main_program,
                              const platform::Place& place);
  virtual void InitOtherEnv(const ProgramDesc& main_program);
  virtual void Run();
  virtual void Finalize();
  virtual void DumpWork(int tid);
  virtual void RegisterServiceHandler();
  virtual int RunTask(const HeterRequest* request, HeterResponse* response);
  virtual Scope* GetWorkerScope(int thread_id);
  virtual void CacheProgram(const ProgramDesc& main_program) {
    new (&program_) ProgramDesc(main_program);
  }
  template <typename T>
  void HeterMemCpy(LoDTensor* tensor, LoDTensor* root_tensor,
                   const paddle::platform::Place& thread_place,
                   cudaStream_t stream);
  void CreateThreadParam(const ProgramDesc& program, int num);
  template <typename T>
  void MergeToRootScope(LoDTensor* root_tensor, LoDTensor* thread_tensor);
  int EndPass(const HeterRequest* request, HeterResponse* response);
  int StopService(const HeterRequest* request, HeterResponse* response);

 protected:
  DownpourWorkerParameter param_;
  std::map<uint64_t, std::vector<std::string>> dense_grad_names_;
  std::vector<std::string> need_merge_var_names_;
  float scale_datanorm_;
  int xpu_begin_op_index_;
  int xpu_end_op_index_;
  bool running_;
  paddle::platform::Place place_;
  std::mutex mutex_;
  ProgramDesc program_;
  std::condition_variable cond_;
  std::shared_ptr<paddle::framework::FleetWrapper> fleet_ptr_;
  std::shared_ptr<paddle::framework::HeterWrapper> heter_ptr_;
  std::shared_ptr<paddle::framework::PullDenseWorker> pull_dense_worker_;
  std::vector<OperatorBase*> ops_;
  std::vector<std::string> op_names_;
  std::vector<Scope*> place_scopes_;
  BtObjectPool<HeterServiceContext> object_pool_;
  std::vector<cudaStream_t> copy_streams_;
  std::vector<platform::Place> places_;
  std::vector<cudaEvent_t> events_;
};
#endif

207
#if defined(PADDLE_WITH_NCCL)
H
hutuxian 已提交
208 209 210 211 212 213 214
class PipelineTrainer : public TrainerBase {
 public:
  PipelineTrainer() {}
  ~PipelineTrainer() override {}
  void Initialize(const TrainerDesc& trainer_desc, Dataset* data_set) override;
  void InitTrainerEnv(const ProgramDesc& main_program,
                      const platform::Place& place) override;
H
hutuxian 已提交
215
  void InitOtherEnv(const ProgramDesc& main_program) override;
H
hutuxian 已提交
216 217
  void Run() override;
  void Finalize() override;
218
  virtual Scope* GetWorkerScope(int thread_id);
H
hutuxian 已提交
219 220
  void InitDumpEnv() override;
  virtual std::string GetDumpPath(int tid);
S
sandyhouse 已提交
221
  void GetSkipVars(const ProgramDesc& main_program);
H
hutuxian 已提交
222 223

 protected:
L
lilong12 已提交
224
  int num_microbatches_;
S
sandyhouse 已提交
225 226
  platform::Place place_;
  std::vector<std::string> skip_vars_;
L
lilong12 已提交
227
  TrainerDesc trainer_desc_;
H
hutuxian 已提交
228

S
sandyhouse 已提交
229 230 231 232 233 234 235 236
  std::thread section_thread_;
  std::shared_ptr<paddle::framework::DeviceWorker> worker_;
  Scope* minibatch_scope_;
  // microbatch_scopes_: [microbatch_id]
  std::vector<Scope*> microbatch_scopes_;

  void CopyParameters(int microbatch_id, const ProgramDesc& program,
                      const platform::Place& place);
H
hutuxian 已提交
237 238
};
#endif
L
lilong12 已提交
239

240 241
}  // namespace framework
}  // namespace paddle