未验证 提交 45d49619 编写于 作者: R Ruibin Cheung 提交者: GitHub

[clang-tidy] enable modernize-make-unique (#55506)

上级 cd0f1523
...@@ -171,7 +171,7 @@ Checks: ' ...@@ -171,7 +171,7 @@ Checks: '
-modernize-deprecated-ios-base-aliases, -modernize-deprecated-ios-base-aliases,
-modernize-loop-convert, -modernize-loop-convert,
-modernize-make-shared, -modernize-make-shared,
-modernize-make-unique, modernize-make-unique,
-modernize-pass-by-value, -modernize-pass-by-value,
-modernize-raw-string-literal, -modernize-raw-string-literal,
-modernize-redundant-void-arg, -modernize-redundant-void-arg,
......
...@@ -294,7 +294,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) { ...@@ -294,7 +294,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) {
OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) { OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) {
std::string name = op_compat.Name(); std::string name = op_compat.Name();
op_compat_judgers_[name].reset(new OpCompat(std::move(op_compat))); op_compat_judgers_[name] = std::make_unique<OpCompat>(std::move(op_compat));
return *(op_compat_judgers_[name]); return *(op_compat_judgers_[name]);
} }
......
...@@ -457,7 +457,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) { ...@@ -457,7 +457,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) {
// The record of original_id_ is only for auto parallel. // The record of original_id_ is only for auto parallel.
original_id_ = op_desc.original_id_; original_id_ = op_desc.original_id_;
if (op_desc.dist_attr_) { if (op_desc.dist_attr_) {
dist_attr_.reset(new OperatorDistAttr(*op_desc.dist_attr_)); dist_attr_ = std::make_unique<OperatorDistAttr>(*op_desc.dist_attr_);
} }
need_update_ = true; need_update_ = true;
} }
...@@ -1145,7 +1145,7 @@ OperatorDistAttr *OpDesc::MutableDistAttr() { ...@@ -1145,7 +1145,7 @@ OperatorDistAttr *OpDesc::MutableDistAttr() {
if (dist_attr_) { if (dist_attr_) {
return dist_attr_.get(); return dist_attr_.get();
} else { } else {
dist_attr_.reset(new OperatorDistAttr(*this)); dist_attr_ = std::make_unique<OperatorDistAttr>(*this);
return dist_attr_.get(); return dist_attr_.get();
} }
} }
......
...@@ -27,7 +27,7 @@ VarDesc::VarDesc(const VarDesc &other) ...@@ -27,7 +27,7 @@ VarDesc::VarDesc(const VarDesc &other)
attrs_(other.attrs_), attrs_(other.attrs_),
original_id_(other.original_id_) { original_id_(other.original_id_) {
if (other.dist_attr_) { if (other.dist_attr_) {
dist_attr_.reset(new TensorDistAttr(*other.dist_attr_)); dist_attr_ = std::make_unique<TensorDistAttr>(*other.dist_attr_);
} }
need_updated_ = true; need_updated_ = true;
} }
...@@ -442,7 +442,7 @@ TensorDistAttr *VarDesc::MutableDistAttr() { ...@@ -442,7 +442,7 @@ TensorDistAttr *VarDesc::MutableDistAttr() {
return dist_attr_.get(); return dist_attr_.get();
} else { } else {
auto shape = paddle::distributed::auto_parallel::get_tensor_shape(this); auto shape = paddle::distributed::auto_parallel::get_tensor_shape(this);
dist_attr_.reset(new TensorDistAttr(shape)); dist_attr_ = std::make_unique<TensorDistAttr>(shape);
return dist_attr_.get(); return dist_attr_.get();
} }
need_updated_ = true; need_updated_ = true;
......
...@@ -53,18 +53,18 @@ PassStrategy *AnalysisConfig::pass_builder() const { ...@@ -53,18 +53,18 @@ PassStrategy *AnalysisConfig::pass_builder() const {
if (!pass_builder_.get()) { if (!pass_builder_.get()) {
if (use_gpu_) { if (use_gpu_) {
LOG(INFO) << "Create GPU IR passes"; LOG(INFO) << "Create GPU IR passes";
pass_builder_.reset(new GpuPassStrategy); pass_builder_ = std::make_unique<GpuPassStrategy>();
} else if (use_xpu_) { } else if (use_xpu_) {
pass_builder_.reset(new XpuPassStrategy); pass_builder_ = std::make_unique<XpuPassStrategy>();
} else if (use_ipu_) { } else if (use_ipu_) {
LOG(INFO) << "Create IPU IR passes"; LOG(INFO) << "Create IPU IR passes";
pass_builder_.reset(new IpuPassStrategy); pass_builder_ = std::make_unique<IpuPassStrategy>();
} else if (use_custom_device_) { } else if (use_custom_device_) {
LOG(INFO) << "Create CUSTOM DEVICE IR passes"; LOG(INFO) << "Create CUSTOM DEVICE IR passes";
pass_builder_.reset(new CustomDevicePassStrategy); pass_builder_ = std::make_unique<CustomDevicePassStrategy>();
} else { } else {
LOG(INFO) << "Create CPU IR passes"; LOG(INFO) << "Create CPU IR passes";
pass_builder_.reset(new CpuPassStrategy); pass_builder_ = std::make_unique<CpuPassStrategy>();
} }
} else if (pass_builder_->use_gpu() ^ use_gpu()) { } else if (pass_builder_->use_gpu() ^ use_gpu()) {
LOG(WARNING) << "The use_gpu flag is not compatible between Config and " LOG(WARNING) << "The use_gpu flag is not compatible between Config and "
...@@ -577,20 +577,20 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { ...@@ -577,20 +577,20 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
false, false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU.")); "Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new GpuPassStrategy( pass_builder_ = std::make_unique<GpuPassStrategy>(
*static_cast<GpuPassStrategy *>(other.pass_builder()))); *static_cast<GpuPassStrategy *>(other.pass_builder()));
} else if (use_ipu_) { } else if (use_ipu_) {
pass_builder_.reset(new IpuPassStrategy( pass_builder_ = std::make_unique<IpuPassStrategy>(
*static_cast<IpuPassStrategy *>(other.pass_builder()))); *static_cast<IpuPassStrategy *>(other.pass_builder()));
} else if (use_xpu_) { } else if (use_xpu_) {
pass_builder_.reset(new XpuPassStrategy( pass_builder_ = std::make_unique<XpuPassStrategy>(
*static_cast<XpuPassStrategy *>(other.pass_builder()))); *static_cast<XpuPassStrategy *>(other.pass_builder()));
} else if (use_custom_device_) { } else if (use_custom_device_) {
pass_builder_.reset(new CustomDevicePassStrategy( pass_builder_ = std::make_unique<CustomDevicePassStrategy>(
*static_cast<CustomDevicePassStrategy *>(other.pass_builder()))); *static_cast<CustomDevicePassStrategy *>(other.pass_builder()));
} else { } else {
pass_builder_.reset(new CpuPassStrategy( pass_builder_ = std::make_unique<CpuPassStrategy>(
*static_cast<CpuPassStrategy *>(other.pass_builder()))); *static_cast<CpuPassStrategy *>(other.pass_builder()));
} }
#undef CP_MEMBER #undef CP_MEMBER
...@@ -663,7 +663,7 @@ void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) { ...@@ -663,7 +663,7 @@ void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) {
void AnalysisConfig::EnableMkldnnQuantizer() { void AnalysisConfig::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
if (!mkldnn_quantizer_config_) if (!mkldnn_quantizer_config_)
mkldnn_quantizer_config_.reset(new MkldnnQuantizerConfig()); mkldnn_quantizer_config_ = std::make_unique<MkldnnQuantizerConfig>();
use_mkldnn_quantizer_ = true; use_mkldnn_quantizer_ = true;
#else #else
LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer"; LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
...@@ -850,54 +850,54 @@ void AnalysisConfig::Update() { ...@@ -850,54 +850,54 @@ void AnalysisConfig::Update() {
((use_ipu() ^ pass_builder_->use_ipu())) || ((use_ipu() ^ pass_builder_->use_ipu())) ||
((use_custom_device() ^ pass_builder_->use_custom_device()))) { ((use_custom_device() ^ pass_builder_->use_custom_device()))) {
if (use_gpu()) { if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy); pass_builder_ = std::make_unique<GpuPassStrategy>();
} else if (use_ipu()) { } else if (use_ipu()) {
pass_builder_.reset(new IpuPassStrategy); pass_builder_ = std::make_unique<IpuPassStrategy>();
} else if (use_xpu()) { } else if (use_xpu()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
use_gpu(), use_gpu(),
false, false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU.")); "Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new XpuPassStrategy); pass_builder_ = std::make_unique<XpuPassStrategy>();
} else if (use_custom_device()) { } else if (use_custom_device()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
use_gpu(), use_gpu(),
false, false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Only one choice can be made between GPU and CustomDevice.")); "Only one choice can be made between GPU and CustomDevice."));
pass_builder_.reset(new CustomDevicePassStrategy); pass_builder_ = std::make_unique<CustomDevicePassStrategy>();
} else { } else {
pass_builder_.reset(new CpuPassStrategy); pass_builder_ = std::make_unique<CpuPassStrategy>();
} }
} else { } else {
if (use_gpu()) { if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy( pass_builder_ = std::make_unique<GpuPassStrategy>(
*static_cast<GpuPassStrategy *>(pass_builder_.get()))); *static_cast<GpuPassStrategy *>(pass_builder_.get()));
} else if (use_ipu()) { } else if (use_ipu()) {
VLOG(1) << "IpuPassStrategy has been used."; VLOG(1) << "IpuPassStrategy has been used.";
pass_builder_.reset(new IpuPassStrategy( pass_builder_ = std::make_unique<IpuPassStrategy>(
*static_cast<IpuPassStrategy *>(pass_builder_.get()))); *static_cast<IpuPassStrategy *>(pass_builder_.get()));
} else if (use_xpu()) { } else if (use_xpu()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
use_gpu(), use_gpu(),
false, false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU.")); "Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new XpuPassStrategy( pass_builder_ = std::make_unique<XpuPassStrategy>(
*static_cast<XpuPassStrategy *>(pass_builder_.get()))); *static_cast<XpuPassStrategy *>(pass_builder_.get()));
} else if (use_custom_device()) { } else if (use_custom_device()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
use_gpu(), use_gpu(),
false, false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Only one choice can be made between GPU and CustomDevice.")); "Only one choice can be made between GPU and CustomDevice."));
pass_builder_.reset(new CustomDevicePassStrategy( pass_builder_ = std::make_unique<CustomDevicePassStrategy>(
*static_cast<CustomDevicePassStrategy *>(pass_builder_.get()))); *static_cast<CustomDevicePassStrategy *>(pass_builder_.get()));
} else { } else {
pass_builder_.reset(new CpuPassStrategy( pass_builder_ = std::make_unique<CpuPassStrategy>(
*static_cast<CpuPassStrategy *>(pass_builder_.get()))); *static_cast<CpuPassStrategy *>(pass_builder_.get()));
} }
} }
......
...@@ -1454,8 +1454,8 @@ void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) { ...@@ -1454,8 +1454,8 @@ void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) {
auto& allocator = cuda_graph_map_[id]; auto& allocator = cuda_graph_map_[id];
auto& ref_cnt = cuda_graph_ref_cnt_[id]; auto& ref_cnt = cuda_graph_ref_cnt_[id];
if (allocator.get() == nullptr) { if (allocator.get() == nullptr) {
allocator.reset( allocator = std::make_unique<AllocatorFacadePrivate>(
new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false)); /*allow_free_idle_chunk=*/false);
VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id; VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id;
} else { } else {
VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id; VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id;
......
...@@ -25,7 +25,7 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr<Allocator> allocator) ...@@ -25,7 +25,7 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr<Allocator> allocator)
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Underlying allocator of BufferedAllocator is NULL")); "Underlying allocator of BufferedAllocator is NULL"));
if (underlying_allocator_->IsAllocThreadSafe()) { if (underlying_allocator_->IsAllocThreadSafe()) {
mtx_.reset(new std::mutex()); mtx_ = std::make_unique<std::mutex>();
} }
} }
......
...@@ -21,11 +21,11 @@ namespace allocation { ...@@ -21,11 +21,11 @@ namespace allocation {
ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p) ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
: place_(p) { : place_(p) {
if (platform::is_gpu_place(place_)) { if (platform::is_gpu_place(place_)) {
buddy_allocator_.reset(new memory::detail::BuddyAllocator( buddy_allocator_ = std::make_unique<memory::detail::BuddyAllocator>(
std::unique_ptr<memory::detail::SystemAllocator>( std::unique_ptr<memory::detail::SystemAllocator>(
new memory::detail::GPUAllocator(place_.device)), new memory::detail::GPUAllocator(place_.device)),
platform::GpuMinChunkSize(), platform::GpuMinChunkSize(),
platform::GpuMaxChunkSize())); platform::GpuMaxChunkSize());
} else { } else {
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Thread local allocator only supports CUDAPlace now.")); "Thread local allocator only supports CUDAPlace now."));
......
...@@ -135,7 +135,7 @@ class RecordedGpuMallocHelper { ...@@ -135,7 +135,7 @@ class RecordedGpuMallocHelper {
explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0) explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0)
: dev_id_(dev_id), limit_size_(limit_size) { : dev_id_(dev_id), limit_size_(limit_size) {
if (NeedRecord()) { if (NeedRecord()) {
mtx_.reset(new std::mutex()); mtx_ = std::make_unique<std::mutex>();
} }
if (FLAGS_enable_gpu_memory_usage_log) { if (FLAGS_enable_gpu_memory_usage_log) {
......
...@@ -1048,11 +1048,11 @@ void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); } ...@@ -1048,11 +1048,11 @@ void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); }
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
GPUPinnedContext::GPUPinnedContext() { GPUPinnedContext::GPUPinnedContext() {
eigen_device_.reset(new Eigen::DefaultDevice()); eigen_device_ = std::make_unique<Eigen::DefaultDevice>();
} }
GPUPinnedContext::GPUPinnedContext(GPUPinnedPlace place) : place_(place) { GPUPinnedContext::GPUPinnedContext(GPUPinnedPlace place) : place_(place) {
eigen_device_.reset(new Eigen::DefaultDevice()); eigen_device_ = std::make_unique<Eigen::DefaultDevice>();
} }
Eigen::DefaultDevice* GPUPinnedContext::eigen_device() const { Eigen::DefaultDevice* GPUPinnedContext::eigen_device() const {
......
...@@ -60,7 +60,7 @@ bool Stream::Init(const Place& place, ...@@ -60,7 +60,7 @@ bool Stream::Init(const Place& place,
phi::DeviceManager::SetDevice(place_); phi::DeviceManager::SetDevice(place_);
device_->CreateStream(this, priority, flag); device_->CreateStream(this, priority, flag);
callback_manager_.reset(new CallbackManager(this)); callback_manager_ = std::make_unique<CallbackManager>(this);
VLOG(3) << "Init Stream: " << stream_ << ", place: " << place_ VLOG(3) << "Init Stream: " << stream_ << ", place: " << place_
<< ", priority: " << static_cast<int>(priority) << ", priority: " << static_cast<int>(priority)
<< ", flag:" << static_cast<int>(flag); << ", flag:" << static_cast<int>(flag);
......
...@@ -47,15 +47,16 @@ void ThreadPool::Init() { ...@@ -47,15 +47,16 @@ void ThreadPool::Init() {
num_threads, num_threads,
0, 0,
phi::errors::InvalidArgument("The number of threads is 0.")); phi::errors::InvalidArgument("The number of threads is 0."));
threadpool_.reset(new ThreadPool(num_threads)); threadpool_ = std::make_unique<ThreadPool>(num_threads);
} }
} }
ThreadPool::ThreadPool(int num_threads) : running_(true) { ThreadPool::ThreadPool(int num_threads) : running_(true) {
threads_.resize(num_threads); threads_.resize(num_threads);
for (auto& thread : threads_) { for (auto& thread : threads_) {
// TODO(Yancey1989): binding the thread on the specify CPU number // TODO(Yancey1989): binding the thread on the specify CPU numberw
thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); thread =
std::make_unique<std::thread>(std::bind(&ThreadPool::TaskLoop, this));
} }
} }
...@@ -111,7 +112,7 @@ ThreadPool* ThreadPoolIO::GetInstanceIO() { ...@@ -111,7 +112,7 @@ ThreadPool* ThreadPoolIO::GetInstanceIO() {
void ThreadPoolIO::InitIO() { void ThreadPoolIO::InitIO() {
if (io_threadpool_.get() == nullptr) { if (io_threadpool_.get() == nullptr) {
// TODO(typhoonzero1986): make this configurable // TODO(typhoonzero1986): make this configurable
io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size)); io_threadpool_ = std::make_unique<ThreadPool>(FLAGS_io_threadpool_size);
} }
} }
} // namespace phi } // namespace phi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册