fleet_wrapper.h 14.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#ifdef PADDLE_WITH_PSLIB
19
#include <archive.h>
D
dongdaxiang 已提交
20
#include <pslib.h>
21
#endif
22
#include <ThreadPool.h>
23
#include <atomic>
X
xujiaqi01 已提交
24
#include <ctime>
D
dongdaxiang 已提交
25
#include <map>
D
dongdaxiang 已提交
26
#include <random>
27
#include <string>
28
#include <unordered_map>
29
#include <vector>
30

T
Thunderbrook 已提交
31
#include "paddle/fluid/framework/heter_util.h"
D
dongdaxiang 已提交
32
#include "paddle/fluid/framework/program_desc.h"
33
#include "paddle/fluid/framework/scope.h"
34
#include "paddle/fluid/framework/tensor.h"
35 36
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/platform/macros.h"  // for DISABLE_COPY_AND_ASSIGN
T
Thunderbrook 已提交
37
#ifdef PADDLE_WITH_HETERPS
38
#include "paddle/fluid/platform/device/gpu/gpu_types.h"
T
Thunderbrook 已提交
39
#endif
40

W
wanghuancoder 已提交
41 42 43 44 45 46
namespace paddle {
namespace framework {
class Scope;
}  // namespace framework
}  // namespace paddle

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
namespace paddle {
namespace framework {

// A wrapper class for pslib.h, this class follows Singleton pattern
// i.e. only initialized once in the current process
// Example:
//    std::shared_ptr<FleetWrapper> fleet_ptr =
//         FleetWrapper::GetInstance();
//    string dist_desc;
//    fleet_ptr->InitServer(dist_desc, 0);
// interface design principles:
// Pull
//   Sync: PullSparseVarsSync
//   Async: PullSparseVarsAsync(not implemented currently)
// Push
//   Sync: PushSparseVarsSync
63 64
//   Async: PushSparseVarsAsync(not implemented currently)
//   Async: PushSparseVarsWithLabelAsync(with special usage)
65 66 67 68 69 70 71
// Push dense variables to server in Async mode
// Param<in>: scope, table_id, var_names
// Param<out>: push_sparse_status

class FleetWrapper {
 public:
  virtual ~FleetWrapper() {}
72 73 74 75
  FleetWrapper() {
    scale_sparse_gradient_with_batch_size_ = true;
    // trainer sleep some time for pslib core dump
    sleep_seconds_before_fail_exit_ = 300;
76 77 78 79 80 81
    // pslib request server timeout ms
    client2client_request_timeout_ms_ = 500000;
    // pslib connect server timeout_ms
    client2client_connect_timeout_ms_ = 10000;
    // pslib request max retry
    client2client_max_retry_ = 3;
82
    pull_local_thread_num_ = 25;
83
  }
84

X
xujiaqi01 已提交
85
  // set client to client communication config
86 87 88
  void SetClient2ClientConfig(int request_timeout_ms, int connect_timeout_ms,
                              int max_retry);

89 90 91
  void SetPullLocalThreadNum(int thread_num) {
    pull_local_thread_num_ = thread_num;
  }
92

T
Thunderbrook 已提交
93 94 95 96 97 98 99 100
#ifdef PADDLE_WITH_PSLIB
  void HeterPullSparseVars(int workerid, std::shared_ptr<HeterTask> task,
                           const uint64_t table_id,
                           const std::vector<std::string>& var_names,
                           int fea_dim,
                           const std::vector<std::string>& var_emb_names);

  void HeterPushSparseVars(
T
Thunderbrook 已提交
101 102
      std::shared_ptr<HeterTask> task, const Scope& scope,
      const uint64_t table_id, const std::vector<std::string>& sparse_key_names,
T
Thunderbrook 已提交
103 104 105 106 107 108 109 110
      const std::vector<std::string>& sparse_grad_names, const int emb_dim,
      std::vector<::std::future<int32_t>>* push_sparse_status,
      const bool use_cvm, const bool dump_slot, const bool no_cvm);
#endif

  typedef std::function<void(int, int)> HeterCallBackFunc;
  int RegisterHeterCallback(HeterCallBackFunc handler);

X
xujiaqi01 已提交
111
  // Pull sparse variables from server in sync mode
112
  // Param<in>: scope, table_id, var_names, fea_keys, fea_dim, var_emb_names
113 114 115 116 117
  // Param<out>: fea_values
  void PullSparseVarsSync(const Scope& scope, const uint64_t table_id,
                          const std::vector<std::string>& var_names,
                          std::vector<uint64_t>* fea_keys,
                          std::vector<std::vector<float>>* fea_values,
118 119
                          int fea_dim,
                          const std::vector<std::string>& var_emb_names);
120 121 122 123

  // Pull sparse variables from server in async mode
  // Param<in>: scope, table_id, var_names, fea_keys, fea_dim
  // Param<out>: fea_values std::future
124 125 126 127 128
  std::future<int32_t> PullSparseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<uint64_t>* fea_keys,
      std::vector<std::vector<float>>* fea_values, int fea_dim);
129 130 131 132 133 134 135 136

  // Pull sparse variables from server in sync mode
  // pull immediately to tensors
  void PullSparseToTensorSync(const uint64_t table_id, int fea_dim,
                              uint64_t padding_id, platform::Place place,
                              std::vector<const LoDTensor*>* inputs,  // NOLINT
                              std::vector<LoDTensor*>* outputs);      // NOLINT

X
xujiaqi01 已提交
137
  // pull dense variables from server in sync mod
138 139
  // Param<in>: scope, table_id, var_names
  // Param<out>: void
140 141 142
  void PullDenseVarsSync(const Scope& scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

X
xujiaqi01 已提交
143 144 145
  // pull dense variables from server in async mod
  // Param<in>: scope, table_id, var_names
  // Param<out>: pull_dense_status
146 147 148
  void PullDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
T
Thunderbrook 已提交
149
      std::vector<::std::future<int32_t>>* pull_dense_status, bool in_cpu);
150

X
xujiaqi01 已提交
151
  // push dense parameters(not gradients) to server in sync mode
D
dongdaxiang 已提交
152
  void PushDenseParamSync(const Scope& scope, const uint64_t table_id,
D
dongdaxiang 已提交
153
                          const std::vector<std::string>& var_names);
154

T
Thunderbrook 已提交
155 156 157
// Push dense variables to server in async mode
// Param<in>: scope, table_id, var_names, scale_datanorm, batch_size
// Param<out>: push_sparse_status
158
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
T
Thunderbrook 已提交
159 160 161 162 163
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size,
164 165
      const paddle::platform::Place& place, gpuStream_t stream,
      gpuEvent_t event);
T
Thunderbrook 已提交
166 167 168 169 170 171 172 173
#endif
#ifdef PADDLE_WITH_XPU
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size,
      const paddle::platform::Place& place);
T
Thunderbrook 已提交
174
#endif
175 176 177
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
178 179
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size);
180

X
xujiaqi01 已提交
181
  // push dense variables to server in sync mode
D
dongdaxiang 已提交
182 183 184
  void PushDenseVarsSync(Scope* scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

X
xujiaqi01 已提交
185
  // Push sparse variables with labels to server in async mode
186 187 188 189 190 191 192 193 194 195 196 197
  std::vector<std::unordered_map<uint64_t, std::vector<float>>> local_tables_;
  void PullSparseToLocal(const uint64_t table_id, int fea_value_dim);
  void PullSparseVarsFromLocal(const Scope& scope, const uint64_t table_id,
                               const std::vector<std::string>& var_names,
                               std::vector<uint64_t>* fea_keys,
                               std::vector<std::vector<float>>* fea_values,
                               int fea_value_dim);
  void ClearLocalTable();
  std::vector<std::unordered_map<uint64_t, std::vector<float>>>&
  GetLocalTable() {
    return local_tables_;
  }
198

199
  // This is specially designed for click/show stats in server
X
xujiaqi01 已提交
200 201
  // Param<in>: scope, table_id, fea_keys, fea_labels, sparse_key_names,
  //            sparse_grad_names, batch_size, use_cvm, dump_slot
202 203 204 205 206 207 208 209
  // Param<out>: push_values, push_sparse_status
  void PushSparseVarsWithLabelAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<uint64_t>& fea_keys,
      const std::vector<float>& fea_labels,
      const std::vector<std::string>& sparse_key_names,
      const std::vector<std::string>& sparse_grad_names, const int emb_dim,
      std::vector<std::vector<float>>* push_values,
210
      std::vector<::std::future<int32_t>>* push_sparse_status,
211
      const int batch_size, const bool use_cvm, const bool dump_slot,
212 213
      std::vector<uint64_t>* sparse_push_keys, const bool no_cvm,
      const bool scale_sparse_gradient_with_batch_size);
214

215 216 217 218 219 220 221 222 223
  // Push sparse variables to server in async mode
  void PushSparseFromTensorWithLabelAsync(
      const Scope& scope, const uint64_t table_id, int fea_dim,
      uint64_t padding_id, bool scale_sparse, const std::string& accesor,
      const std::string& click_name, platform::Place place,
      const std::vector<std::string>& input_names,
      std::vector<const LoDTensor*>* inputs,    // NOLINT
      std::vector<const LoDTensor*>* outputs);  // NOLINT

224 225 226 227 228 229 230 231 232 233 234 235 236
  // Push sparse variables to server in Async mode
  // Param<In>: scope, table_id, fea_keys, sparse_grad_names
  // Param<Out>: push_values, push_sparse_status
  /*
  void PushSparseVarsAsync(
          const Scope& scope,
          const uint64_t table_id,
          const std::vector<uint64_t>& fea_keys,
          const std::vector<std::string>& sparse_grad_names,
          std::vector<std::vector<float>>* push_values,
          std::vector<::std::future<int32_t>>* push_sparse_status);
  */

X
xujiaqi01 已提交
237
  // init server
238
  void InitServer(const std::string& dist_desc, int index);
X
xujiaqi01 已提交
239
  // init trainer
240 241 242
  void InitWorker(const std::string& dist_desc,
                  const std::vector<uint64_t>& host_sign_list, int node_num,
                  int index);
X
xujiaqi01 已提交
243
  // stop server
244
  void StopServer();
245 246
  // finalize worker to make worker can be stop
  void FinalizeWorker();
X
xujiaqi01 已提交
247
  // run server
248
  uint64_t RunServer();
249 250
  // run server with ip port
  uint64_t RunServer(const std::string& ip, uint32_t port);
X
xujiaqi01 已提交
251
  // gather server ip
252
  void GatherServers(const std::vector<uint64_t>& host_sign_list, int node_num);
X
xjqbest 已提交
253
  // gather client ip
X
xjqbest 已提交
254
  void GatherClients(const std::vector<uint64_t>& host_sign_list);
X
xjqbest 已提交
255
  // get client info
X
xjqbest 已提交
256
  std::vector<uint64_t> GetClientsInfo();
X
xjqbest 已提交
257
  // create client to client connection
X
xjqbest 已提交
258
  void CreateClient2ClientConnection();
259 260
  // flush all push requests
  void ClientFlush();
261 262 263 264
  // load from paddle model
  void LoadFromPaddleModel(Scope& scope, const uint64_t table_id,  // NOLINT
                           std::vector<std::string> var_list,
                           std::string model_path, std::string model_proto_file,
265
                           std::vector<std::string> table_var_list,
266
                           bool load_combine);
267 268

  void PrintTableStat(const uint64_t table_id);
269
  void SetFileNumOneShard(const uint64_t table_id, int file_num);
270
  // mode = 0, load all feature
X
xujiaqi01 已提交
271
  // mode = 1, load delta feature, which means load diff
272
  void LoadModel(const std::string& path, const int mode);
273
  // mode = 0, load all feature
X
xujiaqi01 已提交
274
  // mode = 1, load delta feature, which means load diff
275 276
  void LoadModelOneTable(const uint64_t table_id, const std::string& path,
                         const int mode);
277 278 279
  // mode = 0, save all feature
  // mode = 1, save delta feature, which means save diff
  void SaveModel(const std::string& path, const int mode);
280 281
  void SaveMultiTableOnePath(const std::vector<int>& table_ids,
                             const std::string& path, const int mode);
X
xujiaqi01 已提交
282 283 284 285 286 287 288
  // mode = 0, save all feature
  // mode = 1, save delta feature, which means save diff
  void SaveModelOneTable(const uint64_t table_id, const std::string& path,
                         const int mode);
  // save model with prefix
  void SaveModelOneTablePrefix(const uint64_t table_id, const std::string& path,
                               const int mode, const std::string& prefix);
X
xujiaqi01 已提交
289
  // get save cache threshold
290
  double GetCacheThreshold(int table_id);
X
xujiaqi01 已提交
291
  // shuffle cache model between servers
292 293
  void CacheShuffle(int table_id, const std::string& path, const int mode,
                    const double cache_threshold);
X
xujiaqi01 已提交
294 295
  // save cache model
  // cache model can speed up online predict
296
  int32_t SaveCache(int table_id, const std::string& path, const int mode);
297 298 299 300 301
  // save sparse table filtered by user-defined whitelist
  int32_t SaveWithWhitelist(int table_id, const std::string& path,
                            const int mode, const std::string& whitelist_path);
  void LoadWithWhitelist(const uint64_t table_id, const std::string& path,
                         const int mode);
X
xujiaqi01 已提交
302 303 304 305 306 307 308
  // copy feasign key/value from src_table_id to dest_table_id
  int32_t CopyTable(const uint64_t src_table_id, const uint64_t dest_table_id);
  // copy feasign key/value from src_table_id to dest_table_id
  int32_t CopyTableByFeasign(const uint64_t src_table_id,
                             const uint64_t dest_table_id,
                             const std::vector<uint64_t>& feasign_list);
  // clear all models, release their memory
309
  void ClearModel();
X
xujiaqi01 已提交
310 311
  // clear one table
  void ClearOneTable(const uint64_t table_id);
X
xujiaqi01 已提交
312
  // shrink sparse table
313
  void ShrinkSparseTable(int table_id);
X
xujiaqi01 已提交
314
  // shrink dense table
315
  void ShrinkDenseTable(int table_id, Scope* scope,
316 317
                        std::vector<std::string> var_list, float decay,
                        int emb_dim);
318

D
dongdaxiang 已提交
319
  typedef std::function<int32_t(int, int, const std::string&)> MsgHandlerFunc;
X
xujiaqi01 已提交
320
  // register client to client communication
321
  int RegisterClientToClientMsgHandler(int msg_type, MsgHandlerFunc handler);
X
xjqbest 已提交
322
  // send client to client message
D
dongdaxiang 已提交
323 324
  std::future<int32_t> SendClientToClientMsg(int msg_type, int to_client_id,
                                             const std::string& msg);
325 326 327 328
  // confirm all the updated params in the current pass
  void Confirm();
  // revert all the updated params in the current pass
  void Revert();
X
xujiaqi01 已提交
329
  // FleetWrapper singleton
330 331 332 333 334 335
  static std::shared_ptr<FleetWrapper> GetInstance() {
    if (NULL == s_instance_) {
      s_instance_.reset(new paddle::framework::FleetWrapper());
    }
    return s_instance_;
  }
336 337 338
  // this performs better than rand_r, especially large data
  std::default_random_engine& LocalRandomEngine();

339 340
  void SetDate(const uint64_t table_id, const std::string& date);

341 342 343 344
#ifdef PADDLE_WITH_PSLIB
  static std::shared_ptr<paddle::distributed::PSlib> pslib_ptr_;
#endif

345 346
 private:
  static std::shared_ptr<FleetWrapper> s_instance_;
X
xjqbest 已提交
347
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
348
  std::map<uint64_t, std::vector<paddle::ps::Region>> _regions;
X
xjqbest 已提交
349
#endif
350

351 352 353
  size_t GetAbsoluteSum(size_t start, size_t end, size_t level,
                        const framework::LoD& lod);

354
 protected:
355
  static bool is_initialized_;
356
  bool scale_sparse_gradient_with_batch_size_;
357
  int32_t sleep_seconds_before_fail_exit_;
358 359 360
  int client2client_request_timeout_ms_;
  int client2client_connect_timeout_ms_;
  int client2client_max_retry_;
361 362 363 364
  std::unique_ptr<::ThreadPool> local_pull_pool_{nullptr};
  int pull_local_thread_num_;
  std::unique_ptr<::ThreadPool> pull_to_local_pool_{nullptr};
  int local_table_shard_num_;
365 366 367 368 369
  DISABLE_COPY_AND_ASSIGN(FleetWrapper);
};

}  // end namespace framework
}  // end namespace paddle