fleet_wrapper.h 7.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#ifdef PADDLE_WITH_PSLIB
19
#include <archive.h>
D
dongdaxiang 已提交
20
#include <pslib.h>
21
#endif
22
#include <atomic>
X
xujiaqi01 已提交
23
#include <ctime>
D
dongdaxiang 已提交
24
#include <map>
D
dongdaxiang 已提交
25
#include <random>
26 27
#include <string>
#include <vector>
D
dongdaxiang 已提交
28
#include "paddle/fluid/framework/program_desc.h"
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/platform/macros.h"  // for DISABLE_COPY_AND_ASSIGN

namespace paddle {
namespace framework {

// A wrapper class for pslib.h, this class follows Singleton pattern
// i.e. only initialized once in the current process
// Example:
//    std::shared_ptr<FleetWrapper> fleet_ptr =
//         FleetWrapper::GetInstance();
//    string dist_desc;
//    fleet_ptr->InitServer(dist_desc, 0);
// interface design principles:
// Pull
//   Sync: PullSparseVarsSync
//   Async: PullSparseVarsAsync(not implemented currently)
// Push
//   Sync: PushSparseVarsSync
49 50
//   Async: PushSparseVarsAsync(not implemented currently)
//   Async: PushSparseVarsWithLabelAsync(with special usage)
51 52 53 54 55 56 57
// Push dense variables to server in Async mode
// Param<in>: scope, table_id, var_names
// Param<out>: push_sparse_status

class FleetWrapper {
 public:
  virtual ~FleetWrapper() {}
58 59 60 61 62
  FleetWrapper() {
    scale_sparse_gradient_with_batch_size_ = true;
    // trainer sleep some time for pslib core dump
    sleep_seconds_before_fail_exit_ = 300;
  }
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
  // Pull sparse variables from server in Sync mode
  // Param<in>: scope, table_id, var_names, fea_keys
  // Param<out>: fea_values
  void PullSparseVarsSync(const Scope& scope, const uint64_t table_id,
                          const std::vector<std::string>& var_names,
                          std::vector<uint64_t>* fea_keys,
                          std::vector<std::vector<float>>* fea_values,
                          int fea_dim);

  void PullDenseVarsSync(const Scope& scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

  void PullDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
      std::vector<::std::future<int32_t>>* pull_dense_status);

D
dongdaxiang 已提交
80
  void PushDenseParamSync(const Scope& scope, const uint64_t table_id,
D
dongdaxiang 已提交
81
                          const std::vector<std::string>& var_names);
82

83 84 85 86 87 88
  // Push dense variables to server in async mode
  // Param<in>: scope, table_id, var_names,
  // Param<out>: push_sparse_status
  void PushDenseVarsAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<std::string>& var_names,
89 90
      std::vector<::std::future<int32_t>>* push_sparse_status,
      float scale_datanorm, int batch_size);
91

D
dongdaxiang 已提交
92 93 94
  void PushDenseVarsSync(Scope* scope, const uint64_t table_id,
                         const std::vector<std::string>& var_names);

95 96 97 98 99 100 101 102 103 104 105 106
  // Push sparse variables with labels to server in Async mode
  // This is specially designed for click/show stats in server
  // Param<in>: scope, table_id, var_grad_names,
  //            fea_keys, fea_labels, sparse_grad_names
  // Param<out>: push_values, push_sparse_status
  void PushSparseVarsWithLabelAsync(
      const Scope& scope, const uint64_t table_id,
      const std::vector<uint64_t>& fea_keys,
      const std::vector<float>& fea_labels,
      const std::vector<std::string>& sparse_key_names,
      const std::vector<std::string>& sparse_grad_names, const int emb_dim,
      std::vector<std::vector<float>>* push_values,
107
      std::vector<::std::future<int32_t>>* push_sparse_status,
T
Thunderbrook 已提交
108
      const int batch_size, const bool use_cvm, const bool dump_slot);
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

  // Push sparse variables to server in Async mode
  // Param<In>: scope, table_id, fea_keys, sparse_grad_names
  // Param<Out>: push_values, push_sparse_status
  /*
  void PushSparseVarsAsync(
          const Scope& scope,
          const uint64_t table_id,
          const std::vector<uint64_t>& fea_keys,
          const std::vector<std::string>& sparse_grad_names,
          std::vector<std::vector<float>>* push_values,
          std::vector<::std::future<int32_t>>* push_sparse_status);
  */

  void InitServer(const std::string& dist_desc, int index);
  void InitWorker(const std::string& dist_desc,
                  const std::vector<uint64_t>& host_sign_list, int node_num,
                  int index);
  void StopServer();
  uint64_t RunServer();
  void GatherServers(const std::vector<uint64_t>& host_sign_list, int node_num);
X
xjqbest 已提交
130
  // gather client ip
X
xjqbest 已提交
131
  void GatherClients(const std::vector<uint64_t>& host_sign_list);
X
xjqbest 已提交
132
  // get client info
X
xjqbest 已提交
133
  std::vector<uint64_t> GetClientsInfo();
X
xjqbest 已提交
134
  // create client to client connection
X
xjqbest 已提交
135
  void CreateClient2ClientConnection();
136

137 138
  // flush all push requests
  void ClientFlush();
139 140 141 142
  // load from paddle model
  void LoadFromPaddleModel(Scope& scope, const uint64_t table_id,  // NOLINT
                           std::vector<std::string> var_list,
                           std::string model_path, std::string model_proto_file,
143
                           std::vector<std::string> table_var_list,
144
                           bool load_combine);
145 146 147
  // mode = 0, load all feature
  // mode = 1, laod delta feature, which means load diff
  void LoadModel(const std::string& path, const int mode);
148 149 150 151
  // mode = 0, load all feature
  // mode = 1, laod delta feature, which means load diff
  void LoadModelOneTable(const uint64_t table_id, const std::string& path,
                         const int mode);
152 153 154 155
  // mode = 0, save all feature
  // mode = 1, save delta feature, which means save diff
  void SaveModel(const std::string& path, const int mode);

156 157 158 159 160
  double GetCacheThreshold();
  void CacheShuffle(int table_id, const std::string& path, const int mode,
                    const double cache_threshold);
  int32_t SaveCache(int table_id, const std::string& path, const int mode);

161
  void ClearModel();
162

163 164
  void ShrinkSparseTable(int table_id);
  void ShrinkDenseTable(int table_id, Scope* scope,
165 166
                        std::vector<std::string> var_list, float decay,
                        int emb_dim);
167

X
xjqbest 已提交
168
  // register client to client communication
D
dongdaxiang 已提交
169
  typedef std::function<int32_t(int, int, const std::string&)> MsgHandlerFunc;
170
  int RegisterClientToClientMsgHandler(int msg_type, MsgHandlerFunc handler);
X
xjqbest 已提交
171
  // send client to client message
D
dongdaxiang 已提交
172 173
  std::future<int32_t> SendClientToClientMsg(int msg_type, int to_client_id,
                                             const std::string& msg);
174

D
dongdaxiang 已提交
175
  template <typename T>
176
  void Serialize(const std::vector<T*>& t, std::string* str);
D
dongdaxiang 已提交
177
  template <typename T>
178
  void Deserialize(std::vector<T>* t, const std::string& str);
179 180 181 182 183 184 185
  static std::shared_ptr<FleetWrapper> GetInstance() {
    if (NULL == s_instance_) {
      s_instance_.reset(new paddle::framework::FleetWrapper());
    }
    return s_instance_;
  }

186 187 188
  // this performs better than rand_r, especially large data
  std::default_random_engine& LocalRandomEngine();

189 190 191 192
#ifdef PADDLE_WITH_PSLIB
  static std::shared_ptr<paddle::distributed::PSlib> pslib_ptr_;
#endif

193 194
 private:
  static std::shared_ptr<FleetWrapper> s_instance_;
X
xjqbest 已提交
195
#ifdef PADDLE_WITH_PSLIB
X
xujiaqi01 已提交
196
  std::map<uint64_t, std::vector<paddle::ps::Region>> _regions;
X
xjqbest 已提交
197
#endif
198

199
 protected:
200
  static bool is_initialized_;
201
  bool scale_sparse_gradient_with_batch_size_;
202
  int32_t sleep_seconds_before_fail_exit_;
203 204 205 206 207
  DISABLE_COPY_AND_ASSIGN(FleetWrapper);
};

}  // end namespace framework
}  // end namespace paddle