From 45d49619a02b09da9de6ee0a47da91c511c82fc5 Mon Sep 17 00:00:00 2001 From: Ruibin Cheung Date: Fri, 21 Jul 2023 16:58:19 +0800 Subject: [PATCH] [clang-tidy] enable modernize-make-unique (#55506) --- .clang-tidy | 2 +- .../framework/ir/op_compat_sensible_pass.cc | 2 +- paddle/fluid/framework/op_desc.cc | 4 +- paddle/fluid/framework/var_desc.cc | 4 +- paddle/fluid/inference/api/analysis_config.cc | 62 +++++++++---------- .../memory/allocation/allocator_facade.cc | 4 +- .../memory/allocation/buffered_allocator.cc | 2 +- .../allocation/thread_local_allocator.cc | 4 +- paddle/fluid/platform/device/gpu/gpu_info.cc | 2 +- paddle/phi/backends/gpu/gpu_context.cc | 4 +- paddle/phi/backends/stream.cc | 2 +- paddle/phi/core/threadpool.cc | 9 +-- 12 files changed, 51 insertions(+), 50 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 3ae8f6a9b61..f4b350fdea7 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -171,7 +171,7 @@ Checks: ' -modernize-deprecated-ios-base-aliases, -modernize-loop-convert, -modernize-make-shared, --modernize-make-unique, +modernize-make-unique, -modernize-pass-by-value, -modernize-raw-string-literal, -modernize-redundant-void-arg, diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc index 359639ec2c1..37175c70e4b 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc @@ -294,7 +294,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) { OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) { std::string name = op_compat.Name(); - op_compat_judgers_[name].reset(new OpCompat(std::move(op_compat))); + op_compat_judgers_[name] = std::make_unique(std::move(op_compat)); return *(op_compat_judgers_[name]); } diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 73bcd461612..69939df7619 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -457,7 +457,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) { // The record of original_id_ is only for auto parallel. original_id_ = op_desc.original_id_; if (op_desc.dist_attr_) { - dist_attr_.reset(new OperatorDistAttr(*op_desc.dist_attr_)); + dist_attr_ = std::make_unique(*op_desc.dist_attr_); } need_update_ = true; } @@ -1145,7 +1145,7 @@ OperatorDistAttr *OpDesc::MutableDistAttr() { if (dist_attr_) { return dist_attr_.get(); } else { - dist_attr_.reset(new OperatorDistAttr(*this)); + dist_attr_ = std::make_unique(*this); return dist_attr_.get(); } } diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index b75becf3c55..b0130e055c0 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -27,7 +27,7 @@ VarDesc::VarDesc(const VarDesc &other) attrs_(other.attrs_), original_id_(other.original_id_) { if (other.dist_attr_) { - dist_attr_.reset(new TensorDistAttr(*other.dist_attr_)); + dist_attr_ = std::make_unique(*other.dist_attr_); } need_updated_ = true; } @@ -442,7 +442,7 @@ TensorDistAttr *VarDesc::MutableDistAttr() { return dist_attr_.get(); } else { auto shape = paddle::distributed::auto_parallel::get_tensor_shape(this); - dist_attr_.reset(new TensorDistAttr(shape)); + dist_attr_ = std::make_unique(shape); return dist_attr_.get(); } need_updated_ = true; diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 25c7e7e2a03..c04940c09bb 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -53,18 +53,18 @@ PassStrategy *AnalysisConfig::pass_builder() const { if (!pass_builder_.get()) { if (use_gpu_) { LOG(INFO) << "Create GPU IR passes"; - pass_builder_.reset(new GpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_xpu_) { - pass_builder_.reset(new XpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_ipu_) { LOG(INFO) << "Create IPU IR passes"; - pass_builder_.reset(new IpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_custom_device_) { LOG(INFO) << "Create CUSTOM DEVICE IR passes"; - pass_builder_.reset(new CustomDevicePassStrategy); + pass_builder_ = std::make_unique(); } else { LOG(INFO) << "Create CPU IR passes"; - pass_builder_.reset(new CpuPassStrategy); + pass_builder_ = std::make_unique(); } } else if (pass_builder_->use_gpu() ^ use_gpu()) { LOG(WARNING) << "The use_gpu flag is not compatible between Config and " @@ -577,20 +577,20 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { false, platform::errors::InvalidArgument( "Only one choice can be made between CPU and XPU.")); - pass_builder_.reset(new GpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else if (use_ipu_) { - pass_builder_.reset(new IpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else if (use_xpu_) { - pass_builder_.reset(new XpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else if (use_custom_device_) { - pass_builder_.reset(new CustomDevicePassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else { - pass_builder_.reset(new CpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } #undef CP_MEMBER @@ -663,7 +663,7 @@ void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) { void AnalysisConfig::EnableMkldnnQuantizer() { #ifdef PADDLE_WITH_MKLDNN if (!mkldnn_quantizer_config_) - mkldnn_quantizer_config_.reset(new MkldnnQuantizerConfig()); + mkldnn_quantizer_config_ = std::make_unique(); use_mkldnn_quantizer_ = true; #else LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer"; @@ -850,54 +850,54 @@ void AnalysisConfig::Update() { ((use_ipu() ^ pass_builder_->use_ipu())) || ((use_custom_device() ^ pass_builder_->use_custom_device()))) { if (use_gpu()) { - pass_builder_.reset(new GpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_ipu()) { - pass_builder_.reset(new IpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_xpu()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between CPU and XPU.")); - pass_builder_.reset(new XpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_custom_device()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between GPU and CustomDevice.")); - pass_builder_.reset(new CustomDevicePassStrategy); + pass_builder_ = std::make_unique(); } else { - pass_builder_.reset(new CpuPassStrategy); + pass_builder_ = std::make_unique(); } } else { if (use_gpu()) { - pass_builder_.reset(new GpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else if (use_ipu()) { VLOG(1) << "IpuPassStrategy has been used."; - pass_builder_.reset(new IpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else if (use_xpu()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between CPU and XPU.")); - pass_builder_.reset(new XpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else if (use_custom_device()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between GPU and CustomDevice.")); - pass_builder_.reset(new CustomDevicePassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else { - pass_builder_.reset(new CpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } } diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 51de895be3d..b2a886e4aee 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -1454,8 +1454,8 @@ void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) { auto& allocator = cuda_graph_map_[id]; auto& ref_cnt = cuda_graph_ref_cnt_[id]; if (allocator.get() == nullptr) { - allocator.reset( - new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false)); + allocator = std::make_unique( + /*allow_free_idle_chunk=*/false); VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id; } else { VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id; diff --git a/paddle/fluid/memory/allocation/buffered_allocator.cc b/paddle/fluid/memory/allocation/buffered_allocator.cc index 2f9645d330c..8777d8bd10d 100644 --- a/paddle/fluid/memory/allocation/buffered_allocator.cc +++ b/paddle/fluid/memory/allocation/buffered_allocator.cc @@ -25,7 +25,7 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr allocator) platform::errors::InvalidArgument( "Underlying allocator of BufferedAllocator is NULL")); if (underlying_allocator_->IsAllocThreadSafe()) { - mtx_.reset(new std::mutex()); + mtx_ = std::make_unique(); } } diff --git a/paddle/fluid/memory/allocation/thread_local_allocator.cc b/paddle/fluid/memory/allocation/thread_local_allocator.cc index 875e57cfd4b..94219338056 100644 --- a/paddle/fluid/memory/allocation/thread_local_allocator.cc +++ b/paddle/fluid/memory/allocation/thread_local_allocator.cc @@ -21,11 +21,11 @@ namespace allocation { ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p) : place_(p) { if (platform::is_gpu_place(place_)) { - buddy_allocator_.reset(new memory::detail::BuddyAllocator( + buddy_allocator_ = std::make_unique( std::unique_ptr( new memory::detail::GPUAllocator(place_.device)), platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize())); + platform::GpuMaxChunkSize()); } else { PADDLE_THROW(platform::errors::Unavailable( "Thread local allocator only supports CUDAPlace now.")); diff --git a/paddle/fluid/platform/device/gpu/gpu_info.cc b/paddle/fluid/platform/device/gpu/gpu_info.cc index 7f1f2c76bd6..2bd1171c8da 100644 --- a/paddle/fluid/platform/device/gpu/gpu_info.cc +++ b/paddle/fluid/platform/device/gpu/gpu_info.cc @@ -135,7 +135,7 @@ class RecordedGpuMallocHelper { explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0) : dev_id_(dev_id), limit_size_(limit_size) { if (NeedRecord()) { - mtx_.reset(new std::mutex()); + mtx_ = std::make_unique(); } if (FLAGS_enable_gpu_memory_usage_log) { diff --git a/paddle/phi/backends/gpu/gpu_context.cc b/paddle/phi/backends/gpu/gpu_context.cc index 5c9c010d365..ac7de9d33a7 100644 --- a/paddle/phi/backends/gpu/gpu_context.cc +++ b/paddle/phi/backends/gpu/gpu_context.cc @@ -1048,11 +1048,11 @@ void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) GPUPinnedContext::GPUPinnedContext() { - eigen_device_.reset(new Eigen::DefaultDevice()); + eigen_device_ = std::make_unique(); } GPUPinnedContext::GPUPinnedContext(GPUPinnedPlace place) : place_(place) { - eigen_device_.reset(new Eigen::DefaultDevice()); + eigen_device_ = std::make_unique(); } Eigen::DefaultDevice* GPUPinnedContext::eigen_device() const { diff --git a/paddle/phi/backends/stream.cc b/paddle/phi/backends/stream.cc index b328a1e626c..77a0c636711 100644 --- a/paddle/phi/backends/stream.cc +++ b/paddle/phi/backends/stream.cc @@ -60,7 +60,7 @@ bool Stream::Init(const Place& place, phi::DeviceManager::SetDevice(place_); device_->CreateStream(this, priority, flag); - callback_manager_.reset(new CallbackManager(this)); + callback_manager_ = std::make_unique(this); VLOG(3) << "Init Stream: " << stream_ << ", place: " << place_ << ", priority: " << static_cast(priority) << ", flag:" << static_cast(flag); diff --git a/paddle/phi/core/threadpool.cc b/paddle/phi/core/threadpool.cc index db1f3091031..482f8c9bf83 100644 --- a/paddle/phi/core/threadpool.cc +++ b/paddle/phi/core/threadpool.cc @@ -47,15 +47,16 @@ void ThreadPool::Init() { num_threads, 0, phi::errors::InvalidArgument("The number of threads is 0.")); - threadpool_.reset(new ThreadPool(num_threads)); + threadpool_ = std::make_unique(num_threads); } } ThreadPool::ThreadPool(int num_threads) : running_(true) { threads_.resize(num_threads); for (auto& thread : threads_) { - // TODO(Yancey1989): binding the thread on the specify CPU number - thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); + // TODO(Yancey1989): binding the thread on the specify CPU numberw + thread = + std::make_unique(std::bind(&ThreadPool::TaskLoop, this)); } } @@ -111,7 +112,7 @@ ThreadPool* ThreadPoolIO::GetInstanceIO() { void ThreadPoolIO::InitIO() { if (io_threadpool_.get() == nullptr) { // TODO(typhoonzero1986): make this configurable - io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size)); + io_threadpool_ = std::make_unique(FLAGS_io_threadpool_size); } } } // namespace phi -- GitLab