diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index 6801cac95260827f4fd3bd7b8b524e7965e0b7a3..0c33cdd7c8592e5ab451365562ac5d1133068c74 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -41,8 +41,6 @@ namespace paddle { namespace imperative { #if defined(PADDLE_WITH_NCCL) -std::shared_ptr Reducer::s_instance_ = NULL; - template static void ConcatTensorsForAllReduce( const DeviceContext &context, @@ -225,14 +223,8 @@ Reducer::Reducer(const std::vector> &vars, }))); var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index; } - - std::call_once(once_flag_, []() { - std::atexit([]() { Reducer::GetInstance()->ReleaseReducer(); }); - }); } -void Reducer::ReleaseReducer() { parallel_ctx_.reset(); } - void Reducer::InitializeDenseGroups( const std::vector &variable_indices_, Group *p_group) { int64_t all_length = 0; diff --git a/paddle/fluid/imperative/reducer.h b/paddle/fluid/imperative/reducer.h index 9bb528bbdef21270bd86f6b934fc40b592488e9e..90c4cdb3c6a6d289dd7d92d472b4267ecea7d194 100644 --- a/paddle/fluid/imperative/reducer.h +++ b/paddle/fluid/imperative/reducer.h @@ -108,44 +108,16 @@ class Reducer { void AddDistHook(size_t var_index); - // void MarkDenseVarReady(size_t var_index); - - // void MarkSparseVarReady(size_t var_index); - void MarkVarReady(const size_t var_index, const bool is_used_var); void MarkGroupReady(size_t group_index); void FinalizeBackward(); - void ReleaseReducer(); - std::vector> RebuildGruops(); inline bool NeedRebuildGroup() { return !has_rebuilt_group_; } - // Reducer Singleton - static std::shared_ptr SetInstance( - const std::vector>& vars, - const std::vector>& group_indices, - const std::vector& is_sparse_gradient, - std::shared_ptr parallel_ctx, - const std::vector& group_size_limits, bool find_unused_vars) { - if (NULL == s_instance_) { - s_instance_.reset(new paddle::imperative::Reducer( - vars, group_indices, is_sparse_gradient, parallel_ctx, - group_size_limits, find_unused_vars)); - } - return s_instance_; - } - - static std::shared_ptr GetInstance() { - PADDLE_ENFORCE_EQ( - s_instance_ != NULL, true, - platform::errors::InvalidArgument("Reducer is not initialized.")); - return s_instance_; - } - private: std::vector> vars_; std::vector> group_indices_; diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 87aa989c41153c951d504b2dff9a9993bbd8c89e..cceae74f1dca5d8689b033c013ce3f729ea01484 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1390,16 +1390,11 @@ void BindImperative(py::module *m_ptr) { py::class_>( m, "Reducer", R"DOC()DOC") - .def(py::init([]( - const std::vector> &vars, - const std::vector> &group_indices, - const std::vector &is_sparse_gradient, - std::shared_ptr parallel_ctx, - const std::vector &group_size_limits, bool find_unused_vars) { - return imperative::Reducer::SetInstance( - vars, group_indices, is_sparse_gradient, parallel_ctx, - group_size_limits, find_unused_vars); - })) + .def(py::init> &, + const std::vector> &, + const std::vector &, + std::shared_ptr, + const std::vector &, bool>()) .def("prepare_for_backward", &imperative::Reducer::PrepareForBackward, py::arg("vars"), py::call_guard());