fleet_wrapper.h 14.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#ifdef PADDLE_WITH_PSLIB
19
#include <archive.h>
D
dongdaxiang 已提交
20
#include <pslib.h>
21
#endif
22
#include <ThreadPool.h>
23
#include <atomic>
X
xujiaqi01 已提交
24
#include <ctime>
D
dongdaxiang 已提交
25
#include <map>
D
dongdaxiang 已提交
26
#include <random>
27
#include <string>
28
#include <unordered_map>
29
#include <vector>
30

T
Thunderbrook 已提交
31
#include "paddle/fluid/framework/heter_service.h"
D
dongdaxiang 已提交
32
#include "paddle/fluid/framework/program_desc.h"
33
#include "paddle/fluid/framework/scope.h"
34
#include "paddle/fluid/framework/tensor.h"
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/platform/macros.h"  // for DISABLE_COPY_AND_ASSIGN

namespace paddle {
namespace framework {

// A wrapper class for pslib.h, this class follows Singleton pattern
// i.e. only initialized once in the current process
// Example:
//    std::shared_ptr<FleetWrapper> fleet_ptr =
//         FleetWrapper::GetInstance();
//    string dist_desc;
//    fleet_ptr->InitServer(dist_desc, 0);
// interface design principles:
// Pull
//   Sync: PullSparseVarsSync
//   Async: PullSparseVarsAsync(not implemented currently)
// Push
//   Sync: PushSparseVarsSync
54 55
//   Async: PushSparseVarsAsync(not implemented currently)
//   Async: PushSparseVarsWithLabelAsync(with special usage)
56 57 58 59 60 61 62
// Push dense variables to server in Async mode
// Param<in>: scope, table_id, var_names
// Param<out>: push_sparse_status

class FleetWrapper {
 public:
  virtual ~FleetWrapper() {}
63 64 65 66
  FleetWrapper() {
    scale_sparse_gradient_with_batch_size_ = true;
    // trainer sleep some time for pslib core dump
    sleep_seconds_before_fail_exit_ = 300;
67 68 69 70 71 72
    // pslib request server timeout ms
    client2client_request_timeout_ms_ = 500000;
    // pslib connect server timeout_ms
    client2client_connect_timeout_ms_ = 10000;
    // pslib request max retry
    client2client_max_retry_ = 3;
73
    pull_local_thread_num_ = 25;
74
  }
75

X
xujiaqi01 已提交
76
  // set client to client communication config
77 78 79
  void SetClient2ClientConfig(int request_timeout_ms, int connect_timeout_ms,
                              int max_retry);

80 81 82
  void SetPullLocalThreadNum(int thread_num) {
    pull_local_thread_num_ = thread_num;
  }
83

T
Thunderbrook 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
#ifdef PADDLE_WITH_PSLIB
  void HeterPullSparseVars(int workerid, std::shared_ptr<HeterTask> task,
                           const uint64_t table_id,
                           const std::vector<std::string>& var_names,
                           int fea_dim,
                           const std::vector<std::string>& var_emb_names);

  void HeterPushSparseVars(
      std::shared_ptr<HeterTask> task, const uint64_t table_id,
      const std::vector<std::string>& sparse_key_names,
      const std::vector<std::string>& sparse_grad_names, const int emb_dim,
      std::vector<::std::future<int32_t>>* push_sparse_status,
      const bool use_cvm, const bool dump_slot, const bool no_cvm);
#endif

  typedef std::function<void(int, int)> HeterCallBackFunc;
  int RegisterHeterCallback(HeterCallBackFunc handler);

X
xujiaqi01 已提交
102
  // Pull sparse variables from server in sync mode
103
  // Param<in>: scope, table_id, var_names, fea_keys, fea_dim, var_emb_names
104 105 106 107 108
  // Param<out>: fea_values
  void PullSparseVarsSync(const Scope& scope, const uint64_t table_id,
                          const std::vector<std::string>& var_names,
                          std::vector<uint64_t>* fea_keys,
                          std::vector<std::vector<float>>* fea_values,
109 110
                          int fea_dim,
                          const std::vector<std::string>& var_emb_names);
111 112 113 114

  // Pull sparse variables from server in async mode
  // Param<in>: scope, table_id, var_names, fea_keys, fea_dim
  // Param<out>: fea_values std::future
115 116 117 118 119
  std::future<int32_t> PullSparseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<uint64_t>* fea_keys,
      std::vector<std::vector<float>>* fea_values, int fea_dim);
120 121 122 123 124 125 126 127

  // Pull sparse variables from server in sync mode
  // pull immediately to tensors
  void PullSparseToTensorSync(const uint64_t table_id, int fea_dim,
                              uint64_t padding_id, platform::Place place,
                              std::vector<const LoDTensor*>* inputs,  // NOLINT
                              std::vector<LoDTensor*>* outputs);      // NOLINT

X
xujiaqi01 已提交
128
  // pull dense variables from server in sync mod
129 130
  // Param<in>: scope, table_id, var_names
  // Param<out>: void
131 132 133
  void PullDenseVarsSync(const Scope& scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

X
xujiaqi01 已提交
134 135 136
  // pull dense variables from server in async mod
  // Param<in>: scope, table_id, var_names
  // Param<out>: pull_dense_status
137 138 139
  void PullDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
T
Thunderbrook 已提交
140
      std::vector<::std::future<int32_t>>* pull_dense_status, bool in_cpu);
141

X
xujiaqi01 已提交
142
  // push dense parameters(not gradients) to server in sync mode
D
dongdaxiang 已提交
143
  void PushDenseParamSync(const Scope& scope, const uint64_t table_id,
D
dongdaxiang 已提交
144
                          const std::vector<std::string>& var_names);
145

T
Thunderbrook 已提交
146 147 148 149 150 151 152 153 154 155 156 157
// Push dense variables to server in async mode
// Param<in>: scope, table_id, var_names, scale_datanorm, batch_size
// Param<out>: push_sparse_status
#ifdef PADDLE_WITH_CUDA
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size,
      const paddle::platform::Place& place, cudaStream_t stream,
      cudaEvent_t event);
#endif
158 159 160
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
161 162
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size);
163

X
xujiaqi01 已提交
164
  // push dense variables to server in sync mode
D
dongdaxiang 已提交
165 166 167
  void PushDenseVarsSync(Scope* scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

X
xujiaqi01 已提交
168
  // Push sparse variables with labels to server in async mode
169 170 171 172 173 174 175 176 177 178 179 180
  std::vector<std::unordered_map<uint64_t, std::vector<float>>> local_tables_;
  void PullSparseToLocal(const uint64_t table_id, int fea_value_dim);
  void PullSparseVarsFromLocal(const Scope& scope, const uint64_t table_id,
                               const std::vector<std::string>& var_names,
                               std::vector<uint64_t>* fea_keys,
                               std::vector<std::vector<float>>* fea_values,
                               int fea_value_dim);
  void ClearLocalTable();
  std::vector<std::unordered_map<uint64_t, std::vector<float>>>&
  GetLocalTable() {
    return local_tables_;
  }
181

182
  // This is specially designed for click/show stats in server
X
xujiaqi01 已提交
183 184
  // Param<in>: scope, table_id, fea_keys, fea_labels, sparse_key_names,
  //            sparse_grad_names, batch_size, use_cvm, dump_slot
185 186 187 188 189 190 191 192
  // Param<out>: push_values, push_sparse_status
  void PushSparseVarsWithLabelAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<uint64_t>& fea_keys,
      const std::vector<float>& fea_labels,
      const std::vector<std::string>& sparse_key_names,
      const std::vector<std::string>& sparse_grad_names, const int emb_dim,
      std::vector<std::vector<float>>* push_values,
193
      std::vector<::std::future<int32_t>>* push_sparse_status,
194
      const int batch_size, const bool use_cvm, const bool dump_slot,
195
      std::vector<uint64_t>* sparse_push_keys, const bool no_cvm);
196

197 198 199 200 201 202 203 204 205
  // Push sparse variables to server in async mode
  void PushSparseFromTensorWithLabelAsync(
      const Scope& scope, const uint64_t table_id, int fea_dim,
      uint64_t padding_id, bool scale_sparse, const std::string& accesor,
      const std::string& click_name, platform::Place place,
      const std::vector<std::string>& input_names,
      std::vector<const LoDTensor*>* inputs,    // NOLINT
      std::vector<const LoDTensor*>* outputs);  // NOLINT

206 207 208 209 210 211 212 213 214 215 216 217 218
  // Push sparse variables to server in Async mode
  // Param<In>: scope, table_id, fea_keys, sparse_grad_names
  // Param<Out>: push_values, push_sparse_status
  /*
  void PushSparseVarsAsync(
          const Scope& scope,
          const uint64_t table_id,
          const std::vector<uint64_t>& fea_keys,
          const std::vector<std::string>& sparse_grad_names,
          std::vector<std::vector<float>>* push_values,
          std::vector<::std::future<int32_t>>* push_sparse_status);
  */

X
xujiaqi01 已提交
219
  // init server
220
  void InitServer(const std::string& dist_desc, int index);
X
xujiaqi01 已提交
221
  // init trainer
222 223 224
  void InitWorker(const std::string& dist_desc,
                  const std::vector<uint64_t>& host_sign_list, int node_num,
                  int index);
X
xujiaqi01 已提交
225
  // stop server
226
  void StopServer();
227 228
  // finalize worker to make worker can be stop
  void FinalizeWorker();
X
xujiaqi01 已提交
229
  // run server
230
  uint64_t RunServer();
231 232
  // run server with ip port
  uint64_t RunServer(const std::string& ip, uint32_t port);
X
xujiaqi01 已提交
233
  // gather server ip
234
  void GatherServers(const std::vector<uint64_t>& host_sign_list, int node_num);
X
xjqbest 已提交
235
  // gather client ip
X
xjqbest 已提交
236
  void GatherClients(const std::vector<uint64_t>& host_sign_list);
X
xjqbest 已提交
237
  // get client info
X
xjqbest 已提交
238
  std::vector<uint64_t> GetClientsInfo();
X
xjqbest 已提交
239
  // create client to client connection
X
xjqbest 已提交
240
  void CreateClient2ClientConnection();
241 242
  // flush all push requests
  void ClientFlush();
243 244 245 246
  // load from paddle model
  void LoadFromPaddleModel(Scope& scope, const uint64_t table_id,  // NOLINT
                           std::vector<std::string> var_list,
                           std::string model_path, std::string model_proto_file,
247
                           std::vector<std::string> table_var_list,
248
                           bool load_combine);
249 250

  void PrintTableStat(const uint64_t table_id);
251
  // mode = 0, load all feature
X
xujiaqi01 已提交
252
  // mode = 1, load delta feature, which means load diff
253
  void LoadModel(const std::string& path, const int mode);
254
  // mode = 0, load all feature
X
xujiaqi01 已提交
255
  // mode = 1, load delta feature, which means load diff
256 257
  void LoadModelOneTable(const uint64_t table_id, const std::string& path,
                         const int mode);
258 259 260
  // mode = 0, save all feature
  // mode = 1, save delta feature, which means save diff
  void SaveModel(const std::string& path, const int mode);
X
xujiaqi01 已提交
261 262 263 264 265 266 267
  // mode = 0, save all feature
  // mode = 1, save delta feature, which means save diff
  void SaveModelOneTable(const uint64_t table_id, const std::string& path,
                         const int mode);
  // save model with prefix
  void SaveModelOneTablePrefix(const uint64_t table_id, const std::string& path,
                               const int mode, const std::string& prefix);
X
xujiaqi01 已提交
268
  // get save cache threshold
269
  double GetCacheThreshold(int table_id);
X
xujiaqi01 已提交
270
  // shuffle cache model between servers
271 272
  void CacheShuffle(int table_id, const std::string& path, const int mode,
                    const double cache_threshold);
X
xujiaqi01 已提交
273 274
  // save cache model
  // cache model can speed up online predict
275
  int32_t SaveCache(int table_id, const std::string& path, const int mode);
276 277 278 279 280
  // save sparse table filtered by user-defined whitelist
  int32_t SaveWithWhitelist(int table_id, const std::string& path,
                            const int mode, const std::string& whitelist_path);
  void LoadWithWhitelist(const uint64_t table_id, const std::string& path,
                         const int mode);
X
xujiaqi01 已提交
281 282 283 284 285 286 287
  // copy feasign key/value from src_table_id to dest_table_id
  int32_t CopyTable(const uint64_t src_table_id, const uint64_t dest_table_id);
  // copy feasign key/value from src_table_id to dest_table_id
  int32_t CopyTableByFeasign(const uint64_t src_table_id,
                             const uint64_t dest_table_id,
                             const std::vector<uint64_t>& feasign_list);
  // clear all models, release their memory
288
  void ClearModel();
X
xujiaqi01 已提交
289 290
  // clear one table
  void ClearOneTable(const uint64_t table_id);
X
xujiaqi01 已提交
291
  // shrink sparse table
292
  void ShrinkSparseTable(int table_id);
X
xujiaqi01 已提交
293
  // shrink dense table
294
  void ShrinkDenseTable(int table_id, Scope* scope,
295 296
                        std::vector<std::string> var_list, float decay,
                        int emb_dim);
297

D
dongdaxiang 已提交
298
  typedef std::function<int32_t(int, int, const std::string&)> MsgHandlerFunc;
X
xujiaqi01 已提交
299
  // register client to client communication
300
  int RegisterClientToClientMsgHandler(int msg_type, MsgHandlerFunc handler);
X
xjqbest 已提交
301
  // send client to client message
D
dongdaxiang 已提交
302 303
  std::future<int32_t> SendClientToClientMsg(int msg_type, int to_client_id,
                                             const std::string& msg);
304 305 306 307
  // confirm all the updated params in the current pass
  void Confirm();
  // revert all the updated params in the current pass
  void Revert();
X
xujiaqi01 已提交
308
  // FleetWrapper singleton
309 310 311 312 313 314
  static std::shared_ptr<FleetWrapper> GetInstance() {
    if (NULL == s_instance_) {
      s_instance_.reset(new paddle::framework::FleetWrapper());
    }
    return s_instance_;
  }
315 316 317
  // this performs better than rand_r, especially large data
  std::default_random_engine& LocalRandomEngine();

318 319 320 321
#ifdef PADDLE_WITH_PSLIB
  static std::shared_ptr<paddle::distributed::PSlib> pslib_ptr_;
#endif

322 323
 private:
  static std::shared_ptr<FleetWrapper> s_instance_;
X
xjqbest 已提交
324
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
325
  std::map<uint64_t, std::vector<paddle::ps::Region>> _regions;
X
xjqbest 已提交
326
#endif
327

328 329 330
  size_t GetAbsoluteSum(size_t start, size_t end, size_t level,
                        const framework::LoD& lod);

331
 protected:
332
  static bool is_initialized_;
333
  bool scale_sparse_gradient_with_batch_size_;
334
  int32_t sleep_seconds_before_fail_exit_;
335 336 337
  int client2client_request_timeout_ms_;
  int client2client_connect_timeout_ms_;
  int client2client_max_retry_;
338 339 340 341
  std::unique_ptr<::ThreadPool> local_pull_pool_{nullptr};
  int pull_local_thread_num_;
  std::unique_ptr<::ThreadPool> pull_to_local_pool_{nullptr};
  int local_table_shard_num_;
342 343 344 345 346
  DISABLE_COPY_AND_ASSIGN(FleetWrapper);
};

}  // end namespace framework
}  // end namespace paddle