diff --git a/.clang-tidy b/.clang-tidy index 3ae8f6a9b61bc5d8f75a72b21be8ac8c3555a6e6..f4b350fdea7a641244039657996e26ec2d4ab65d 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -171,7 +171,7 @@ Checks: ' -modernize-deprecated-ios-base-aliases, -modernize-loop-convert, -modernize-make-shared, --modernize-make-unique, +modernize-make-unique, -modernize-pass-by-value, -modernize-raw-string-literal, -modernize-redundant-void-arg, diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc index 359639ec2c16c1c81e8767e95c5148d5cfd79af8..37175c70e4b668a74ceabd5c899168743b5be91a 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc @@ -294,7 +294,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) { OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) { std::string name = op_compat.Name(); - op_compat_judgers_[name].reset(new OpCompat(std::move(op_compat))); + op_compat_judgers_[name] = std::make_unique(std::move(op_compat)); return *(op_compat_judgers_[name]); } diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 73bcd461612670deabfae1f0810d2d9c9cab3ebe..69939df7619e03104c681ad337f4051cfd815a83 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -457,7 +457,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) { // The record of original_id_ is only for auto parallel. original_id_ = op_desc.original_id_; if (op_desc.dist_attr_) { - dist_attr_.reset(new OperatorDistAttr(*op_desc.dist_attr_)); + dist_attr_ = std::make_unique(*op_desc.dist_attr_); } need_update_ = true; } @@ -1145,7 +1145,7 @@ OperatorDistAttr *OpDesc::MutableDistAttr() { if (dist_attr_) { return dist_attr_.get(); } else { - dist_attr_.reset(new OperatorDistAttr(*this)); + dist_attr_ = std::make_unique(*this); return dist_attr_.get(); } } diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index b75becf3c55422006dce758f211fc726418a326a..b0130e055c07591416f945059bedd56e17fb7373 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -27,7 +27,7 @@ VarDesc::VarDesc(const VarDesc &other) attrs_(other.attrs_), original_id_(other.original_id_) { if (other.dist_attr_) { - dist_attr_.reset(new TensorDistAttr(*other.dist_attr_)); + dist_attr_ = std::make_unique(*other.dist_attr_); } need_updated_ = true; } @@ -442,7 +442,7 @@ TensorDistAttr *VarDesc::MutableDistAttr() { return dist_attr_.get(); } else { auto shape = paddle::distributed::auto_parallel::get_tensor_shape(this); - dist_attr_.reset(new TensorDistAttr(shape)); + dist_attr_ = std::make_unique(shape); return dist_attr_.get(); } need_updated_ = true; diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 25c7e7e2a03d40d533e5a17ed3e4c5e9c391d34a..c04940c09bb8c3dd6bd9bba9dee59bed3088d4ea 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -53,18 +53,18 @@ PassStrategy *AnalysisConfig::pass_builder() const { if (!pass_builder_.get()) { if (use_gpu_) { LOG(INFO) << "Create GPU IR passes"; - pass_builder_.reset(new GpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_xpu_) { - pass_builder_.reset(new XpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_ipu_) { LOG(INFO) << "Create IPU IR passes"; - pass_builder_.reset(new IpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_custom_device_) { LOG(INFO) << "Create CUSTOM DEVICE IR passes"; - pass_builder_.reset(new CustomDevicePassStrategy); + pass_builder_ = std::make_unique(); } else { LOG(INFO) << "Create CPU IR passes"; - pass_builder_.reset(new CpuPassStrategy); + pass_builder_ = std::make_unique(); } } else if (pass_builder_->use_gpu() ^ use_gpu()) { LOG(WARNING) << "The use_gpu flag is not compatible between Config and " @@ -577,20 +577,20 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { false, platform::errors::InvalidArgument( "Only one choice can be made between CPU and XPU.")); - pass_builder_.reset(new GpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else if (use_ipu_) { - pass_builder_.reset(new IpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else if (use_xpu_) { - pass_builder_.reset(new XpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else if (use_custom_device_) { - pass_builder_.reset(new CustomDevicePassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } else { - pass_builder_.reset(new CpuPassStrategy( - *static_cast(other.pass_builder()))); + pass_builder_ = std::make_unique( + *static_cast(other.pass_builder())); } #undef CP_MEMBER @@ -663,7 +663,7 @@ void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) { void AnalysisConfig::EnableMkldnnQuantizer() { #ifdef PADDLE_WITH_MKLDNN if (!mkldnn_quantizer_config_) - mkldnn_quantizer_config_.reset(new MkldnnQuantizerConfig()); + mkldnn_quantizer_config_ = std::make_unique(); use_mkldnn_quantizer_ = true; #else LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer"; @@ -850,54 +850,54 @@ void AnalysisConfig::Update() { ((use_ipu() ^ pass_builder_->use_ipu())) || ((use_custom_device() ^ pass_builder_->use_custom_device()))) { if (use_gpu()) { - pass_builder_.reset(new GpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_ipu()) { - pass_builder_.reset(new IpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_xpu()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between CPU and XPU.")); - pass_builder_.reset(new XpuPassStrategy); + pass_builder_ = std::make_unique(); } else if (use_custom_device()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between GPU and CustomDevice.")); - pass_builder_.reset(new CustomDevicePassStrategy); + pass_builder_ = std::make_unique(); } else { - pass_builder_.reset(new CpuPassStrategy); + pass_builder_ = std::make_unique(); } } else { if (use_gpu()) { - pass_builder_.reset(new GpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else if (use_ipu()) { VLOG(1) << "IpuPassStrategy has been used."; - pass_builder_.reset(new IpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else if (use_xpu()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between CPU and XPU.")); - pass_builder_.reset(new XpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else if (use_custom_device()) { PADDLE_ENFORCE_EQ( use_gpu(), false, platform::errors::InvalidArgument( "Only one choice can be made between GPU and CustomDevice.")); - pass_builder_.reset(new CustomDevicePassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } else { - pass_builder_.reset(new CpuPassStrategy( - *static_cast(pass_builder_.get()))); + pass_builder_ = std::make_unique( + *static_cast(pass_builder_.get())); } } diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 51de895be3d42426cb21a400458859ce21eba8aa..b2a886e4aee4f2aa411e6e61595b3929b7d084eb 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -1454,8 +1454,8 @@ void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) { auto& allocator = cuda_graph_map_[id]; auto& ref_cnt = cuda_graph_ref_cnt_[id]; if (allocator.get() == nullptr) { - allocator.reset( - new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false)); + allocator = std::make_unique( + /*allow_free_idle_chunk=*/false); VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id; } else { VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id; diff --git a/paddle/fluid/memory/allocation/buffered_allocator.cc b/paddle/fluid/memory/allocation/buffered_allocator.cc index 2f9645d330c95f9ac587614b87e791f0eba55e0b..8777d8bd10db78aff6f25604ceaae1c7d1a58a9c 100644 --- a/paddle/fluid/memory/allocation/buffered_allocator.cc +++ b/paddle/fluid/memory/allocation/buffered_allocator.cc @@ -25,7 +25,7 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr allocator) platform::errors::InvalidArgument( "Underlying allocator of BufferedAllocator is NULL")); if (underlying_allocator_->IsAllocThreadSafe()) { - mtx_.reset(new std::mutex()); + mtx_ = std::make_unique(); } } diff --git a/paddle/fluid/memory/allocation/thread_local_allocator.cc b/paddle/fluid/memory/allocation/thread_local_allocator.cc index 875e57cfd4b8735a789ce259dad26d72a5e11bde..942193380566db06996879082323b4daee210bfd 100644 --- a/paddle/fluid/memory/allocation/thread_local_allocator.cc +++ b/paddle/fluid/memory/allocation/thread_local_allocator.cc @@ -21,11 +21,11 @@ namespace allocation { ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p) : place_(p) { if (platform::is_gpu_place(place_)) { - buddy_allocator_.reset(new memory::detail::BuddyAllocator( + buddy_allocator_ = std::make_unique( std::unique_ptr( new memory::detail::GPUAllocator(place_.device)), platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize())); + platform::GpuMaxChunkSize()); } else { PADDLE_THROW(platform::errors::Unavailable( "Thread local allocator only supports CUDAPlace now.")); diff --git a/paddle/fluid/platform/device/gpu/gpu_info.cc b/paddle/fluid/platform/device/gpu/gpu_info.cc index 7f1f2c76bd630dba8dfaae3fe1d33232fe9d5422..2bd1171c8da55d15dd8afaf5bf73b4fd9fa32869 100644 --- a/paddle/fluid/platform/device/gpu/gpu_info.cc +++ b/paddle/fluid/platform/device/gpu/gpu_info.cc @@ -135,7 +135,7 @@ class RecordedGpuMallocHelper { explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0) : dev_id_(dev_id), limit_size_(limit_size) { if (NeedRecord()) { - mtx_.reset(new std::mutex()); + mtx_ = std::make_unique(); } if (FLAGS_enable_gpu_memory_usage_log) { diff --git a/paddle/phi/backends/gpu/gpu_context.cc b/paddle/phi/backends/gpu/gpu_context.cc index 5c9c010d365e4e6256713fe9db8d57417172b794..ac7de9d33a7aa14180b6cd3c4ec4afaea75a470c 100644 --- a/paddle/phi/backends/gpu/gpu_context.cc +++ b/paddle/phi/backends/gpu/gpu_context.cc @@ -1048,11 +1048,11 @@ void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) GPUPinnedContext::GPUPinnedContext() { - eigen_device_.reset(new Eigen::DefaultDevice()); + eigen_device_ = std::make_unique(); } GPUPinnedContext::GPUPinnedContext(GPUPinnedPlace place) : place_(place) { - eigen_device_.reset(new Eigen::DefaultDevice()); + eigen_device_ = std::make_unique(); } Eigen::DefaultDevice* GPUPinnedContext::eigen_device() const { diff --git a/paddle/phi/backends/stream.cc b/paddle/phi/backends/stream.cc index b328a1e626cbc814ba13f7e3d8b0312a6708c41a..77a0c636711c08c52fe602570981ee922ccc7047 100644 --- a/paddle/phi/backends/stream.cc +++ b/paddle/phi/backends/stream.cc @@ -60,7 +60,7 @@ bool Stream::Init(const Place& place, phi::DeviceManager::SetDevice(place_); device_->CreateStream(this, priority, flag); - callback_manager_.reset(new CallbackManager(this)); + callback_manager_ = std::make_unique(this); VLOG(3) << "Init Stream: " << stream_ << ", place: " << place_ << ", priority: " << static_cast(priority) << ", flag:" << static_cast(flag); diff --git a/paddle/phi/core/threadpool.cc b/paddle/phi/core/threadpool.cc index db1f3091031fc156ae4fcea96864b9c395ea6aef..482f8c9bf83b38f474e3acd65ebdbe8d6dacae9c 100644 --- a/paddle/phi/core/threadpool.cc +++ b/paddle/phi/core/threadpool.cc @@ -47,15 +47,16 @@ void ThreadPool::Init() { num_threads, 0, phi::errors::InvalidArgument("The number of threads is 0.")); - threadpool_.reset(new ThreadPool(num_threads)); + threadpool_ = std::make_unique(num_threads); } } ThreadPool::ThreadPool(int num_threads) : running_(true) { threads_.resize(num_threads); for (auto& thread : threads_) { - // TODO(Yancey1989): binding the thread on the specify CPU number - thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); + // TODO(Yancey1989): binding the thread on the specify CPU numberw + thread = + std::make_unique(std::bind(&ThreadPool::TaskLoop, this)); } } @@ -111,7 +112,7 @@ ThreadPool* ThreadPoolIO::GetInstanceIO() { void ThreadPoolIO::InitIO() { if (io_threadpool_.get() == nullptr) { // TODO(typhoonzero1986): make this configurable - io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size)); + io_threadpool_ = std::make_unique(FLAGS_io_threadpool_size); } } } // namespace phi