From 3858f458ea5a3103c75f1d68107f023449874db7 Mon Sep 17 00:00:00 2001 From: ShenLiang Date: Fri, 29 Jan 2021 18:28:23 +0800 Subject: [PATCH] rm Singleton of reducer (#30775) --- paddle/fluid/imperative/reducer.cc | 8 -------- paddle/fluid/imperative/reducer.h | 28 ---------------------------- paddle/fluid/pybind/imperative.cc | 15 +++++---------- 3 files changed, 5 insertions(+), 46 deletions(-) diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index 6801cac9526..0c33cdd7c85 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -41,8 +41,6 @@ namespace paddle { namespace imperative { #if defined(PADDLE_WITH_NCCL) -std::shared_ptr Reducer::s_instance_ = NULL; - template static void ConcatTensorsForAllReduce( const DeviceContext &context, @@ -225,14 +223,8 @@ Reducer::Reducer(const std::vector> &vars, }))); var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index; } - - std::call_once(once_flag_, []() { - std::atexit([]() { Reducer::GetInstance()->ReleaseReducer(); }); - }); } -void Reducer::ReleaseReducer() { parallel_ctx_.reset(); } - void Reducer::InitializeDenseGroups( const std::vector &variable_indices_, Group *p_group) { int64_t all_length = 0; diff --git a/paddle/fluid/imperative/reducer.h b/paddle/fluid/imperative/reducer.h index 9bb528bbdef..90c4cdb3c6a 100644 --- a/paddle/fluid/imperative/reducer.h +++ b/paddle/fluid/imperative/reducer.h @@ -108,44 +108,16 @@ class Reducer { void AddDistHook(size_t var_index); - // void MarkDenseVarReady(size_t var_index); - - // void MarkSparseVarReady(size_t var_index); - void MarkVarReady(const size_t var_index, const bool is_used_var); void MarkGroupReady(size_t group_index); void FinalizeBackward(); - void ReleaseReducer(); - std::vector> RebuildGruops(); inline bool NeedRebuildGroup() { return !has_rebuilt_group_; } - // Reducer Singleton - static std::shared_ptr SetInstance( - const std::vector>& vars, - const std::vector>& group_indices, - const std::vector& is_sparse_gradient, - std::shared_ptr parallel_ctx, - const std::vector& group_size_limits, bool find_unused_vars) { - if (NULL == s_instance_) { - s_instance_.reset(new paddle::imperative::Reducer( - vars, group_indices, is_sparse_gradient, parallel_ctx, - group_size_limits, find_unused_vars)); - } - return s_instance_; - } - - static std::shared_ptr GetInstance() { - PADDLE_ENFORCE_EQ( - s_instance_ != NULL, true, - platform::errors::InvalidArgument("Reducer is not initialized.")); - return s_instance_; - } - private: std::vector> vars_; std::vector> group_indices_; diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 87aa989c411..cceae74f1dc 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1390,16 +1390,11 @@ void BindImperative(py::module *m_ptr) { py::class_>( m, "Reducer", R"DOC()DOC") - .def(py::init([]( - const std::vector> &vars, - const std::vector> &group_indices, - const std::vector &is_sparse_gradient, - std::shared_ptr parallel_ctx, - const std::vector &group_size_limits, bool find_unused_vars) { - return imperative::Reducer::SetInstance( - vars, group_indices, is_sparse_gradient, parallel_ctx, - group_size_limits, find_unused_vars); - })) + .def(py::init> &, + const std::vector> &, + const std::vector &, + std::shared_ptr, + const std::vector &, bool>()) .def("prepare_for_backward", &imperative::Reducer::PrepareForBackward, py::arg("vars"), py::call_guard()); -- GitLab