提交 b0911390 编写于 作者: D dongdaxiang

add nccl wrapper for python API

上级 fff795e5
...@@ -3,3 +3,5 @@ if(WITH_PSLIB) ...@@ -3,3 +3,5 @@ if(WITH_PSLIB)
else() else()
cc_library(fleet_wrapper SRCS fleet_wrapper.cc DEPS framework_proto variable_helper scope) cc_library(fleet_wrapper SRCS fleet_wrapper.cc DEPS framework_proto variable_helper scope)
endif(WITH_PSLIB) endif(WITH_PSLIB)
cc_library(nccl_wrapper SRCS nccl_wrapper.cc DEPS framework_proto variable_helper scope)
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/fleet/nccl_wrapper.h"
#include <utility>
#include "paddle/fluid/framework/data_feed.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
std::shared_ptr<NCCLWrapper> NCCLWrapper::s_instance_ = NULL;
bool NCCLWrapper::is_initialized_ = false;
void NCCLWrapper::InitNCCL() {
platform::dynload::ncclCommInitRank(
&(nccl_info_.comm_), nccl_info_.global_ranks_, nccl_info_.nccl_id_,
nccl_info_.my_global_rank_);
return;
}
void NCCLWrapper::SetNCCLId(const NCCLInfo& nccl_info) {
nccl_info_.nccl_id_ = nccl_info.nccl_id_;
}
NCCLInfo NCCLWrapper::GetNCCLId() {
PADDLE_ENFORCE(platform::dynload::ncclGetUniqueId(&(nccl_info_.nccl_id_)));
return nccl_info_;
}
void NCCLWrapper::SetRankInfo(const int local_rank, const int global_rank,
const int ranks) {
nccl_info_.local_rank_ = local_rank;
nccl_info_.my_global_rank_ = global_rank;
nccl_info_.global_ranks_ = ranks;
PADDLE_ENFORCE(cudaSetDevice(local_rank));
PADDLE_ENFORCE(cudaStreamCreate(&(nccl_info_.stream_)));
return;
}
void NCCLWrapper::SyncVar(const int root_rank, const Scope& scope,
const std::vector<std::string>& var_names) {
for (auto& name : var_names) {
auto var = scope.FindVar(name);
LoDTensor* tensor = var->GetMutable<LoDTensor>();
int32_t total_size = tensor->numel();
platform::dynload::ncclBcast(reinterpret_cast<void*>(tensor->data<float>()),
total_size, ncclFloat, root_rank,
nccl_info_.comm_, nccl_info_.stream_);
cudaStreamSynchronize(nccl_info_.stream_);
}
}
} // end namespace framework
} // end namespace paddle
...@@ -29,114 +29,49 @@ limitations under the License. */ ...@@ -29,114 +29,49 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class NCCLInfo {
public:
NCCLInfo() {}
virtual ~NCCLInfo() {}
public:
int local_rank_;
int global_ranks_;
int my_global_rank_;
ncclUniqueId nccl_id_;
ncclComm_t comm_;
cudaStream_t stream_;
};
class NCCLWrapper { class NCCLWrapper {
public: public:
virtual ~NCCLWrapper() {} virtual ~NCCLWrapper() {}
NCCLWrapper() {} NCCLWrapper() {}
// Pull sparse variables from server in Sync mode void InitNCCL();
// Param<in>: scope, table_id, var_names, fea_keys void SetNCCLId(const NCCLInfo& nccl_info);
// Param<out>: fea_values NCCLInfo GetNCCLId();
void PullSparseVarsSync(const Scope& scope, const uint64_t table_id, void SetRankInfo(const int local_rank, const int global_rank,
const std::vector<std::string>& var_names, const int ranks);
std::vector<uint64_t>* fea_keys, void SyncVar(const int root_rank, const Scope& scope,
std::vector<std::vector<float>>* fea_values, const std::vector<std::string>& var_names);
int fea_dim);
void PullDenseVarsSync(const Scope& scope, const uint64_t table_id,
const std::vector<std::string>& var_names);
void PullDenseVarsAsync(
const Scope& scope, const uint64_t table_id,
const std::vector<std::string>& var_names,
std::vector<::std::future<int32_t>>* pull_dense_status);
void PushDenseParamSync(const Scope& scope, const uint64_t table_id,
const std::vector<std::string>& var_names);
// Push dense variables to server in async mode static std::shared_ptr<NCCLWrapper> GetInstance() {
// Param<in>: scope, table_id, var_names,
// Param<out>: push_sparse_status
void PushDenseVarsAsync(
const Scope& scope, const uint64_t table_id,
const std::vector<std::string>& var_names,
std::vector<::std::future<int32_t>>* push_sparse_status);
void PushDenseVarsSync(Scope* scope, const uint64_t table_id,
const std::vector<std::string>& var_names);
// Push sparse variables with labels to server in Async mode
// This is specially designed for click/show stats in server
// Param<in>: scope, table_id, var_grad_names,
// fea_keys, fea_labels, sparse_grad_names
// Param<out>: push_values, push_sparse_status
void PushSparseVarsWithLabelAsync(
const Scope& scope, const uint64_t table_id,
const std::vector<uint64_t>& fea_keys,
const std::vector<float>& fea_labels,
const std::vector<std::string>& sparse_key_names,
const std::vector<std::string>& sparse_grad_names, const int emb_dim,
std::vector<std::vector<float>>* push_values,
std::vector<::std::future<int32_t>>* push_sparse_status);
// Push sparse variables to server in Async mode
// Param<In>: scope, table_id, fea_keys, sparse_grad_names
// Param<Out>: push_values, push_sparse_status
/*
void PushSparseVarsAsync(
const Scope& scope,
const uint64_t table_id,
const std::vector<uint64_t>& fea_keys,
const std::vector<std::string>& sparse_grad_names,
std::vector<std::vector<float>>* push_values,
std::vector<::std::future<int32_t>>* push_sparse_status);
*/
void InitServer(const std::string& dist_desc, int index);
void InitWorker(const std::string& dist_desc,
const std::vector<uint64_t>& host_sign_list, int node_num,
int index);
void StopServer();
uint64_t RunServer();
void GatherServers(const std::vector<uint64_t>& host_sign_list, int node_num);
// gather client ip
void GatherClients(const std::vector<uint64_t>& host_sign_list);
// get client info
std::vector<uint64_t> GetClientsInfo();
// create client to client connection
void CreateClient2ClientConnection();
// register client to client communication
typedef std::function<int32_t(int, int, const std::string&)> MsgHandlerFunc;
int RegisterClientToClientMsgHandler(int msg_type, MsgHandlerFunc handler);
// send client to client message
std::future<int32_t> SendClientToClientMsg(int msg_type, int to_client_id,
const std::string& msg);
template <typename T>
void Serialize(const std::vector<T*>& t, std::string* str);
template <typename T>
void Deserialize(std::vector<T>* t, const std::string& str);
static std::shared_ptr<FleetWrapper> GetInstance() {
if (NULL == s_instance_) { if (NULL == s_instance_) {
s_instance_.reset(new paddle::framework::FleetWrapper()); s_instance_.reset(new paddle::framework::NCCLWrapper());
} }
return s_instance_; return s_instance_;
} }
#ifdef PADDLE_WITH_PSLIB public:
static std::shared_ptr<paddle::distributed::PSlib> pslib_ptr_; NCCLInfo nccl_info_;
#endif
private: private:
static std::shared_ptr<FleetWrapper> s_instance_; static std::shared_ptr<NCCLWrapper> s_instance_;
#ifdef PADDLE_WITH_PSLIB
std::map<uint64_t, std::vector<paddle::ps::Region>> _regions;
#endif
protected: protected:
static bool is_initialized_; static bool is_initialized_;
DISABLE_COPY_AND_ASSIGN(FleetWrapper); DISABLE_COPY_AND_ASSIGN(NCCLWrapper);
}; };
} // end namespace framework } // end namespace framework
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <fcntl.h>
#ifdef _POSIX_C_SOURCE
#undef _POSIX_C_SOURCE
#endif
#ifdef _XOPEN_SOURCE
#undef _XOPEN_SOURCE
#endif
#include <string>
#include <vector>
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/text_format.h"
#include "paddle/fluid/framework/async_executor.h"
#include "paddle/fluid/framework/data_feed.h"
#include "paddle/fluid/framework/data_feed.pb.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/variant.h"
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
namespace py = pybind11;
namespace pd = paddle::framework;
namespace paddle {
namespace pybind {
void BindNCCLWrapper(py::module* m) {
py::class_<framework::NCCLWrapper>(*m, "Nccl")
.def(py::init())
.def("init_nccl", &framework::NCCLWrapper::InitNCCL)
.def("set_nccl_id", &framework::NCCLWrapper::SetNCCLId)
.def("set_rank_info", &framework::NCCLWrapper::SetRankInfo)
.def("sync_var", &framework::NCCLWrapper::SyncVar);
} // end NCCLWrapper
} // end namespace pybind
} // end namespace paddle
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace py = pybind11;
namespace paddle {
namespace pybind {
void BindNCCLWrapper(py::module* m);
} // namespace pybind
} // namespace paddle
...@@ -58,6 +58,7 @@ limitations under the License. */ ...@@ -58,6 +58,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/imperative.h" #include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/pybind/inference_api.h" #include "paddle/fluid/pybind/inference_api.h"
#include "paddle/fluid/pybind/ir.h" #include "paddle/fluid/pybind/ir.h"
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#include "paddle/fluid/pybind/protobuf.h" #include "paddle/fluid/pybind/protobuf.h"
#include "paddle/fluid/pybind/pybind.h" // NOLINT #include "paddle/fluid/pybind/pybind.h" // NOLINT
#include "paddle/fluid/pybind/reader_py.h" #include "paddle/fluid/pybind/reader_py.h"
...@@ -1405,6 +1406,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1405,6 +1406,7 @@ All parameter, weight, gradient are variables in Paddle.
BindRecordIOWriter(&m); BindRecordIOWriter(&m);
BindAsyncExecutor(&m); BindAsyncExecutor(&m);
BindFleetWrapper(&m); BindFleetWrapper(&m);
BindNCCLWrapper(&m);
BindGraph(&m); BindGraph(&m);
BindNode(&m); BindNode(&m);
BindInferenceApi(&m); BindInferenceApi(&m);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册