/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #ifdef PADDLE_WITH_PSLIB #include #include #endif #include #include #include #include #include #include #include #include #include #include "paddle/fluid/framework/heter_util.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/variable_helper.h" #include "paddle/fluid/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN #ifdef PADDLE_WITH_HETERPS #include "paddle/fluid/platform/device/gpu/gpu_types.h" #endif #include "paddle/fluid/framework/fleet/heter_ps/log_patch.h" namespace paddle { namespace framework { class Scope; } // namespace framework } // namespace paddle namespace paddle { namespace framework { // A wrapper class for pslib.h, this class follows Singleton pattern // i.e. only initialized once in the current process // Example: // std::shared_ptr fleet_ptr = // FleetWrapper::GetInstance(); // string dist_desc; // fleet_ptr->InitServer(dist_desc, 0); // interface design principles: // Pull // Sync: PullSparseVarsSync // Async: PullSparseVarsAsync(not implemented currently) // Push // Sync: PushSparseVarsSync // Async: PushSparseVarsAsync(not implemented currently) // Async: PushSparseVarsWithLabelAsync(with special usage) // Push dense variables to server in Async mode // Param: scope, table_id, var_names // Param: push_sparse_status class FleetWrapper { public: virtual ~FleetWrapper() {} FleetWrapper() { scale_sparse_gradient_with_batch_size_ = true; // trainer sleep some time for pslib core dump sleep_seconds_before_fail_exit_ = 300; // pslib request server timeout ms client2client_request_timeout_ms_ = 500000; // pslib connect server timeout_ms client2client_connect_timeout_ms_ = 10000; // pslib request max retry client2client_max_retry_ = 3; pull_local_thread_num_ = 25; } // set client to client communication config void SetClient2ClientConfig(int request_timeout_ms, int connect_timeout_ms, int max_retry); void SetPullLocalThreadNum(int thread_num) { pull_local_thread_num_ = thread_num; } #ifdef PADDLE_WITH_PSLIB void HeterPullSparseVars(int workerid, std::shared_ptr task, const uint64_t table_id, const std::vector& var_names, int fea_dim, const std::vector& var_emb_names); void HeterPushSparseVars( std::shared_ptr task, const Scope& scope, const uint64_t table_id, const std::vector& sparse_key_names, const std::vector& sparse_grad_names, const int emb_dim, std::vector<::std::future>* push_sparse_status, const bool use_cvm, const bool dump_slot, const bool no_cvm); #endif typedef std::function HeterCallBackFunc; int RegisterHeterCallback(HeterCallBackFunc handler); // Pull sparse variables from server in sync mode // Param: scope, table_id, var_names, fea_keys, fea_dim, var_emb_names // Param: fea_values void PullSparseVarsSync(const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector* fea_keys, std::vector>* fea_values, int fea_dim, const std::vector& var_emb_names); // Pull sparse variables from server in async mode // Param: scope, table_id, var_names, fea_keys, fea_dim // Param: fea_values std::future std::future PullSparseVarsAsync( const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector* fea_keys, std::vector>* fea_values, int fea_dim); // Pull sparse variables from server in sync mode // pull immediately to tensors void PullSparseToTensorSync( const uint64_t table_id, int fea_dim, uint64_t padding_id, platform::Place place, std::vector* inputs, // NOLINT std::vector* outputs); // NOLINT // pull dense variables from server in sync mod // Param: scope, table_id, var_names // Param: void void PullDenseVarsSync(const Scope& scope, const uint64_t table_id, const std::vector& var_names); // pull dense variables from server in async mod // Param: scope, table_id, var_names // Param: pull_dense_status void PullDenseVarsAsync( const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector<::std::future>* pull_dense_status, bool in_cpu); // push dense parameters(not gradients) to server in sync mode void PushDenseParamSync(const Scope& scope, const uint64_t table_id, const std::vector& var_names); // Push dense variables to server in async mode // Param: scope, table_id, var_names, scale_datanorm, batch_size // Param: push_sparse_status #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) void PushDenseVarsAsync( const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector<::std::future>* push_sparse_status, float scale_datanorm, int batch_size, const paddle::platform::Place& place, gpuStream_t stream, gpuEvent_t event); #endif #ifdef PADDLE_WITH_XPU void PushDenseVarsAsync( const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector<::std::future>* push_sparse_status, float scale_datanorm, int batch_size, const paddle::platform::Place& place); #endif void PushDenseVarsAsync( const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector<::std::future>* push_sparse_status, float scale_datanorm, int batch_size); // push dense variables to server in sync mode void PushDenseVarsSync(Scope* scope, const uint64_t table_id, const std::vector& var_names); // Push sparse variables with labels to server in async mode std::vector>> local_tables_; void PullSparseToLocal(const uint64_t table_id, int fea_value_dim); void PullSparseVarsFromLocal(const Scope& scope, const uint64_t table_id, const std::vector& var_names, std::vector* fea_keys, std::vector>* fea_values, int fea_value_dim); void ClearLocalTable(); std::vector>>& GetLocalTable() { return local_tables_; } // This is specially designed for click/show stats in server // Param: scope, table_id, fea_keys, fea_labels, sparse_key_names, // sparse_grad_names, batch_size, use_cvm, dump_slot // Param: push_values, push_sparse_status void PushSparseVarsWithLabelAsync( const Scope& scope, const uint64_t table_id, const std::vector& fea_keys, const std::vector& fea_labels, const std::vector& sparse_key_names, const std::vector& sparse_grad_names, const int emb_dim, std::vector>* push_values, std::vector<::std::future>* push_sparse_status, const int batch_size, const bool use_cvm, const bool dump_slot, std::vector* sparse_push_keys, const bool no_cvm, const bool scale_sparse_gradient_with_batch_size); // Push sparse variables to server in async mode void PushSparseFromTensorWithLabelAsync( const Scope& scope, const uint64_t table_id, int fea_dim, uint64_t padding_id, bool scale_sparse, const std::string& accesor, const std::string& click_name, platform::Place place, const std::vector& input_names, std::vector* inputs, // NOLINT std::vector* outputs); // NOLINT // Push sparse variables to server in Async mode // Param: scope, table_id, fea_keys, sparse_grad_names // Param: push_values, push_sparse_status /* void PushSparseVarsAsync( const Scope& scope, const uint64_t table_id, const std::vector& fea_keys, const std::vector& sparse_grad_names, std::vector>* push_values, std::vector<::std::future>* push_sparse_status); */ // init server void InitServer(const std::string& dist_desc, int index); // init trainer void InitWorker(const std::string& dist_desc, const std::vector& host_sign_list, int node_num, int index); // stop server void StopServer(); // finalize worker to make worker can be stop void FinalizeWorker(); // run server uint64_t RunServer(); // run server with ip port uint64_t RunServer(const std::string& ip, uint32_t port); // gather server ip void GatherServers(const std::vector& host_sign_list, int node_num); // gather client ip void GatherClients(const std::vector& host_sign_list); // get client info std::vector GetClientsInfo(); // create client to client connection void CreateClient2ClientConnection(); // flush all push requests void ClientFlush(); // load from paddle model void LoadFromPaddleModel(Scope& scope, // NOLINT const uint64_t table_id, // NOLINT std::vector var_list, std::string model_path, std::string model_proto_file, std::vector table_var_list, bool load_combine); void PrintTableStat(uint64_t table_id, uint32_t pass_id, size_t threshold); void SetFileNumOneShard(const uint64_t table_id, int file_num); // mode = 0, load all feature // mode = 1, load delta feature, which means load diff void LoadModel(const std::string& path, const int mode); // mode = 0, load all feature // mode = 1, load delta feature, which means load diff void LoadModelOneTable(const uint64_t table_id, const std::string& path, const int mode); // mode = 0, save all feature // mode = 1, save delta feature, which means save diff void SaveModel(const std::string& path, const int mode); void SaveMultiTableOnePath(const std::vector& table_ids, const std::string& path, const int mode); // mode = 0, save all feature // mode = 1, save delta feature, which means save diff void SaveModelOneTable(const uint64_t table_id, const std::string& path, const int mode); // save model with prefix void SaveModelOneTablePrefix(const uint64_t table_id, const std::string& path, const int mode, const std::string& prefix); // get save cache threshold double GetCacheThreshold(int table_id); // shuffle cache model between servers void CacheShuffle(int table_id, const std::string& path, const int mode, const double cache_threshold); // save cache model // cache model can speed up online predict int32_t SaveCache(int table_id, const std::string& path, const int mode); // save sparse table filtered by user-defined whitelist int32_t SaveWithWhitelist(int table_id, const std::string& path, const int mode, const std::string& whitelist_path); void LoadWithWhitelist(const uint64_t table_id, const std::string& path, const int mode); // copy feasign key/value from src_table_id to dest_table_id int32_t CopyTable(const uint64_t src_table_id, const uint64_t dest_table_id); // copy feasign key/value from src_table_id to dest_table_id int32_t CopyTableByFeasign(const uint64_t src_table_id, const uint64_t dest_table_id, const std::vector& feasign_list); // clear all models, release their memory void ClearModel(); // clear one table void ClearOneTable(const uint64_t table_id); // shrink sparse table void ShrinkSparseTable(int table_id); // shrink dense table void ShrinkDenseTable(int table_id, Scope* scope, std::vector var_list, float decay, int emb_dim); typedef std::function MsgHandlerFunc; // register client to client communication int RegisterClientToClientMsgHandler(int msg_type, MsgHandlerFunc handler); // send client to client message std::future SendClientToClientMsg(int msg_type, int to_client_id, const std::string& msg); // confirm all the updated params in the current pass void Confirm(); // revert all the updated params in the current pass void Revert(); std::string GetDistDesc() const { CHECK(is_initialized_ == true) << "fleetwrapper should be initialized first!!!"; return dist_desc_; } // FleetWrapper singleton static std::shared_ptr GetInstance() { { std::lock_guard lk(ins_mutex); if (NULL == s_instance_) { s_instance_.reset(new paddle::framework::FleetWrapper()); } } return s_instance_; } // this performs better than rand_r, especially large data std::default_random_engine& LocalRandomEngine(); void SetDate(const uint64_t table_id, const std::string& date); #ifdef PADDLE_WITH_PSLIB static std::shared_ptr pslib_ptr_; #endif private: static std::shared_ptr s_instance_; std::string dist_desc_; static std::mutex ins_mutex; #ifdef PADDLE_WITH_PSLIB std::map> _regions; #endif size_t GetAbsoluteSum(size_t start, size_t end, size_t level, const framework::LoD& lod); protected: static bool is_initialized_; bool scale_sparse_gradient_with_batch_size_; int32_t sleep_seconds_before_fail_exit_; int client2client_request_timeout_ms_; int client2client_connect_timeout_ms_; int client2client_max_retry_; std::unique_ptr<::ThreadPool> local_pull_pool_{nullptr}; int pull_local_thread_num_; std::unique_ptr<::ThreadPool> pull_to_local_pool_{nullptr}; int local_table_shard_num_; DISABLE_COPY_AND_ASSIGN(FleetWrapper); }; } // end namespace framework } // end namespace paddle