fleet_wrapper.h 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#ifdef PADDLE_WITH_PSLIB
19
#include <archive.h>
D
dongdaxiang 已提交
20
#include <pslib.h>
21
#endif
22
#include <atomic>
X
xujiaqi01 已提交
23
#include <ctime>
D
dongdaxiang 已提交
24
#include <map>
D
dongdaxiang 已提交
25
#include <random>
26 27
#include <string>
#include <vector>
D
dongdaxiang 已提交
28
#include "paddle/fluid/framework/program_desc.h"
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/platform/macros.h"  // for DISABLE_COPY_AND_ASSIGN

namespace paddle {
namespace framework {

// A wrapper class for pslib.h, this class follows Singleton pattern
// i.e. only initialized once in the current process
// Example:
//    std::shared_ptr<FleetWrapper> fleet_ptr =
//         FleetWrapper::GetInstance();
//    string dist_desc;
//    fleet_ptr->InitServer(dist_desc, 0);
// interface design principles:
// Pull
//   Sync: PullSparseVarsSync
//   Async: PullSparseVarsAsync(not implemented currently)
// Push
//   Sync: PushSparseVarsSync
49 50
//   Async: PushSparseVarsAsync(not implemented currently)
//   Async: PushSparseVarsWithLabelAsync(with special usage)
51 52 53 54 55 56 57
// Push dense variables to server in Async mode
// Param<in>: scope, table_id, var_names
// Param<out>: push_sparse_status

class FleetWrapper {
 public:
  virtual ~FleetWrapper() {}
58 59 60 61
  FleetWrapper() {
    scale_sparse_gradient_with_batch_size_ = true;
    // trainer sleep some time for pslib core dump
    sleep_seconds_before_fail_exit_ = 300;
62 63 64 65 66 67
    // pslib request server timeout ms
    client2client_request_timeout_ms_ = 500000;
    // pslib connect server timeout_ms
    client2client_connect_timeout_ms_ = 10000;
    // pslib request max retry
    client2client_max_retry_ = 3;
68
  }
69

X
xujiaqi01 已提交
70
  // set client to client communication config
71 72 73
  void SetClient2ClientConfig(int request_timeout_ms, int connect_timeout_ms,
                              int max_retry);

X
xujiaqi01 已提交
74 75
  // Pull sparse variables from server in sync mode
  // Param<in>: scope, table_id, var_names, fea_keys, fea_dim
76 77 78 79 80
  // Param<out>: fea_values
  void PullSparseVarsSync(const Scope& scope, const uint64_t table_id,
                          const std::vector<std::string>& var_names,
                          std::vector<uint64_t>* fea_keys,
                          std::vector<std::vector<float>>* fea_values,
81 82
                          int fea_dim,
                          const std::vector<std::string>& var_emb_names);
83

X
xujiaqi01 已提交
84
  // pull dense variables from server in sync mod
85 86 87
  void PullDenseVarsSync(const Scope& scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

X
xujiaqi01 已提交
88 89 90
  // pull dense variables from server in async mod
  // Param<in>: scope, table_id, var_names
  // Param<out>: pull_dense_status
91 92 93 94 95
  void PullDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<::std::future<int32_t>>* pull_dense_status);

X
xujiaqi01 已提交
96
  // push dense parameters(not gradients) to server in sync mode
D
dongdaxiang 已提交
97
  void PushDenseParamSync(const Scope& scope, const uint64_t table_id,
D
dongdaxiang 已提交
98
                          const std::vector<std::string>& var_names);
99

100
  // Push dense variables to server in async mode
X
xujiaqi01 已提交
101
  // Param<in>: scope, table_id, var_names, scale_datanorm, batch_size
102 103 104 105
  // Param<out>: push_sparse_status
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
106 107
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size);
108

X
xujiaqi01 已提交
109
  // push dense variables to server in sync mode
D
dongdaxiang 已提交
110 111 112
  void PushDenseVarsSync(Scope* scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

X
xujiaqi01 已提交
113
  // Push sparse variables with labels to server in async mode
114
  // This is specially designed for click/show stats in server
X
xujiaqi01 已提交
115 116
  // Param<in>: scope, table_id, fea_keys, fea_labels, sparse_key_names,
  //            sparse_grad_names, batch_size, use_cvm, dump_slot
117 118 119 120 121 122 123 124
  // Param<out>: push_values, push_sparse_status
  void PushSparseVarsWithLabelAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<uint64_t>& fea_keys,
      const std::vector<float>& fea_labels,
      const std::vector<std::string>& sparse_key_names,
      const std::vector<std::string>& sparse_grad_names, const int emb_dim,
      std::vector<std::vector<float>>* push_values,
125
      std::vector<::std::future<int32_t>>* push_sparse_status,
126
      const int batch_size, const bool use_cvm, const bool dump_slot,
127
      std::vector<uint64_t>* sparse_push_keys, const bool no_cvm);
128 129 130 131 132 133 134 135 136 137 138 139 140 141

  // Push sparse variables to server in Async mode
  // Param<In>: scope, table_id, fea_keys, sparse_grad_names
  // Param<Out>: push_values, push_sparse_status
  /*
  void PushSparseVarsAsync(
          const Scope& scope,
          const uint64_t table_id,
          const std::vector<uint64_t>& fea_keys,
          const std::vector<std::string>& sparse_grad_names,
          std::vector<std::vector<float>>* push_values,
          std::vector<::std::future<int32_t>>* push_sparse_status);
  */

X
xujiaqi01 已提交
142
  // init server
143
  void InitServer(const std::string& dist_desc, int index);
X
xujiaqi01 已提交
144
  // init trainer
145 146 147
  void InitWorker(const std::string& dist_desc,
                  const std::vector<uint64_t>& host_sign_list, int node_num,
                  int index);
X
xujiaqi01 已提交
148
  // stop server
149
  void StopServer();
150 151
  // finalize worker to make worker can be stop
  void FinalizeWorker();
X
xujiaqi01 已提交
152
  // run server
153
  uint64_t RunServer();
X
xujiaqi01 已提交
154
  // gather server ip
155
  void GatherServers(const std::vector<uint64_t>& host_sign_list, int node_num);
X
xjqbest 已提交
156
  // gather client ip
X
xjqbest 已提交
157
  void GatherClients(const std::vector<uint64_t>& host_sign_list);
X
xjqbest 已提交
158
  // get client info
X
xjqbest 已提交
159
  std::vector<uint64_t> GetClientsInfo();
X
xjqbest 已提交
160
  // create client to client connection
X
xjqbest 已提交
161
  void CreateClient2ClientConnection();
162 163
  // flush all push requests
  void ClientFlush();
164 165 166 167
  // load from paddle model
  void LoadFromPaddleModel(Scope& scope, const uint64_t table_id,  // NOLINT
                           std::vector<std::string> var_list,
                           std::string model_path, std::string model_proto_file,
168
                           std::vector<std::string> table_var_list,
169
                           bool load_combine);
170 171 172
  // mode = 0, load all feature
  // mode = 1, laod delta feature, which means load diff
  void LoadModel(const std::string& path, const int mode);
173 174 175 176
  // mode = 0, load all feature
  // mode = 1, laod delta feature, which means load diff
  void LoadModelOneTable(const uint64_t table_id, const std::string& path,
                         const int mode);
177 178 179
  // mode = 0, save all feature
  // mode = 1, save delta feature, which means save diff
  void SaveModel(const std::string& path, const int mode);
X
xujiaqi01 已提交
180
  // get save cache threshold
181
  double GetCacheThreshold(int table_id);
X
xujiaqi01 已提交
182
  // shuffle cache model between servers
183 184
  void CacheShuffle(int table_id, const std::string& path, const int mode,
                    const double cache_threshold);
X
xujiaqi01 已提交
185 186
  // save cache model
  // cache model can speed up online predict
187
  int32_t SaveCache(int table_id, const std::string& path, const int mode);
X
xujiaqi01 已提交
188 189 190 191 192 193 194
  // copy feasign key/value from src_table_id to dest_table_id
  int32_t CopyTable(const uint64_t src_table_id, const uint64_t dest_table_id);
  // copy feasign key/value from src_table_id to dest_table_id
  int32_t CopyTableByFeasign(const uint64_t src_table_id,
                             const uint64_t dest_table_id,
                             const std::vector<uint64_t>& feasign_list);
  // clear all models, release their memory
195
  void ClearModel();
X
xujiaqi01 已提交
196
  // shrink sparse table
197
  void ShrinkSparseTable(int table_id);
X
xujiaqi01 已提交
198
  // shrink dense table
199
  void ShrinkDenseTable(int table_id, Scope* scope,
200 201
                        std::vector<std::string> var_list, float decay,
                        int emb_dim);
202

D
dongdaxiang 已提交
203
  typedef std::function<int32_t(int, int, const std::string&)> MsgHandlerFunc;
X
xujiaqi01 已提交
204
  // register client to client communication
205
  int RegisterClientToClientMsgHandler(int msg_type, MsgHandlerFunc handler);
X
xjqbest 已提交
206
  // send client to client message
D
dongdaxiang 已提交
207 208
  std::future<int32_t> SendClientToClientMsg(int msg_type, int to_client_id,
                                             const std::string& msg);
X
xujiaqi01 已提交
209
  // FleetWrapper singleton
210 211 212 213 214 215
  static std::shared_ptr<FleetWrapper> GetInstance() {
    if (NULL == s_instance_) {
      s_instance_.reset(new paddle::framework::FleetWrapper());
    }
    return s_instance_;
  }
216 217 218
  // this performs better than rand_r, especially large data
  std::default_random_engine& LocalRandomEngine();

219 220 221 222
#ifdef PADDLE_WITH_PSLIB
  static std::shared_ptr<paddle::distributed::PSlib> pslib_ptr_;
#endif

223 224
 private:
  static std::shared_ptr<FleetWrapper> s_instance_;
X
xjqbest 已提交
225
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
226
  std::map<uint64_t, std::vector<paddle::ps::Region>> _regions;
X
xjqbest 已提交
227
#endif
228

229
 protected:
230
  static bool is_initialized_;
231
  bool scale_sparse_gradient_with_batch_size_;
232
  int32_t sleep_seconds_before_fail_exit_;
233 234 235
  int client2client_request_timeout_ms_;
  int client2client_connect_timeout_ms_;
  int client2client_max_retry_;
236 237 238 239 240
  DISABLE_COPY_AND_ASSIGN(FleetWrapper);
};

}  // end namespace framework
}  // end namespace paddle