未验证 提交 45d49619 编写于 作者: R Ruibin Cheung 提交者: GitHub

[clang-tidy] enable modernize-make-unique (#55506)

上级 cd0f1523
......@@ -171,7 +171,7 @@ Checks: '
-modernize-deprecated-ios-base-aliases,
-modernize-loop-convert,
-modernize-make-shared,
-modernize-make-unique,
modernize-make-unique,
-modernize-pass-by-value,
-modernize-raw-string-literal,
-modernize-redundant-void-arg,
......
......@@ -294,7 +294,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) {
OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) {
std::string name = op_compat.Name();
op_compat_judgers_[name].reset(new OpCompat(std::move(op_compat)));
op_compat_judgers_[name] = std::make_unique<OpCompat>(std::move(op_compat));
return *(op_compat_judgers_[name]);
}
......
......@@ -457,7 +457,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) {
// The record of original_id_ is only for auto parallel.
original_id_ = op_desc.original_id_;
if (op_desc.dist_attr_) {
dist_attr_.reset(new OperatorDistAttr(*op_desc.dist_attr_));
dist_attr_ = std::make_unique<OperatorDistAttr>(*op_desc.dist_attr_);
}
need_update_ = true;
}
......@@ -1145,7 +1145,7 @@ OperatorDistAttr *OpDesc::MutableDistAttr() {
if (dist_attr_) {
return dist_attr_.get();
} else {
dist_attr_.reset(new OperatorDistAttr(*this));
dist_attr_ = std::make_unique<OperatorDistAttr>(*this);
return dist_attr_.get();
}
}
......
......@@ -27,7 +27,7 @@ VarDesc::VarDesc(const VarDesc &other)
attrs_(other.attrs_),
original_id_(other.original_id_) {
if (other.dist_attr_) {
dist_attr_.reset(new TensorDistAttr(*other.dist_attr_));
dist_attr_ = std::make_unique<TensorDistAttr>(*other.dist_attr_);
}
need_updated_ = true;
}
......@@ -442,7 +442,7 @@ TensorDistAttr *VarDesc::MutableDistAttr() {
return dist_attr_.get();
} else {
auto shape = paddle::distributed::auto_parallel::get_tensor_shape(this);
dist_attr_.reset(new TensorDistAttr(shape));
dist_attr_ = std::make_unique<TensorDistAttr>(shape);
return dist_attr_.get();
}
need_updated_ = true;
......
......@@ -53,18 +53,18 @@ PassStrategy *AnalysisConfig::pass_builder() const {
if (!pass_builder_.get()) {
if (use_gpu_) {
LOG(INFO) << "Create GPU IR passes";
pass_builder_.reset(new GpuPassStrategy);
pass_builder_ = std::make_unique<GpuPassStrategy>();
} else if (use_xpu_) {
pass_builder_.reset(new XpuPassStrategy);
pass_builder_ = std::make_unique<XpuPassStrategy>();
} else if (use_ipu_) {
LOG(INFO) << "Create IPU IR passes";
pass_builder_.reset(new IpuPassStrategy);
pass_builder_ = std::make_unique<IpuPassStrategy>();
} else if (use_custom_device_) {
LOG(INFO) << "Create CUSTOM DEVICE IR passes";
pass_builder_.reset(new CustomDevicePassStrategy);
pass_builder_ = std::make_unique<CustomDevicePassStrategy>();
} else {
LOG(INFO) << "Create CPU IR passes";
pass_builder_.reset(new CpuPassStrategy);
pass_builder_ = std::make_unique<CpuPassStrategy>();
}
} else if (pass_builder_->use_gpu() ^ use_gpu()) {
LOG(WARNING) << "The use_gpu flag is not compatible between Config and "
......@@ -577,20 +577,20 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
false,
platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new GpuPassStrategy(
*static_cast<GpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<GpuPassStrategy>(
*static_cast<GpuPassStrategy *>(other.pass_builder()));
} else if (use_ipu_) {
pass_builder_.reset(new IpuPassStrategy(
*static_cast<IpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<IpuPassStrategy>(
*static_cast<IpuPassStrategy *>(other.pass_builder()));
} else if (use_xpu_) {
pass_builder_.reset(new XpuPassStrategy(
*static_cast<XpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<XpuPassStrategy>(
*static_cast<XpuPassStrategy *>(other.pass_builder()));
} else if (use_custom_device_) {
pass_builder_.reset(new CustomDevicePassStrategy(
*static_cast<CustomDevicePassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<CustomDevicePassStrategy>(
*static_cast<CustomDevicePassStrategy *>(other.pass_builder()));
} else {
pass_builder_.reset(new CpuPassStrategy(
*static_cast<CpuPassStrategy *>(other.pass_builder())));
pass_builder_ = std::make_unique<CpuPassStrategy>(
*static_cast<CpuPassStrategy *>(other.pass_builder()));
}
#undef CP_MEMBER
......@@ -663,7 +663,7 @@ void AnalysisConfig::SetMkldnnCacheCapacity(int capacity) {
void AnalysisConfig::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
if (!mkldnn_quantizer_config_)
mkldnn_quantizer_config_.reset(new MkldnnQuantizerConfig());
mkldnn_quantizer_config_ = std::make_unique<MkldnnQuantizerConfig>();
use_mkldnn_quantizer_ = true;
#else
LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
......@@ -850,54 +850,54 @@ void AnalysisConfig::Update() {
((use_ipu() ^ pass_builder_->use_ipu())) ||
((use_custom_device() ^ pass_builder_->use_custom_device()))) {
if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy);
pass_builder_ = std::make_unique<GpuPassStrategy>();
} else if (use_ipu()) {
pass_builder_.reset(new IpuPassStrategy);
pass_builder_ = std::make_unique<IpuPassStrategy>();
} else if (use_xpu()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new XpuPassStrategy);
pass_builder_ = std::make_unique<XpuPassStrategy>();
} else if (use_custom_device()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between GPU and CustomDevice."));
pass_builder_.reset(new CustomDevicePassStrategy);
pass_builder_ = std::make_unique<CustomDevicePassStrategy>();
} else {
pass_builder_.reset(new CpuPassStrategy);
pass_builder_ = std::make_unique<CpuPassStrategy>();
}
} else {
if (use_gpu()) {
pass_builder_.reset(new GpuPassStrategy(
*static_cast<GpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<GpuPassStrategy>(
*static_cast<GpuPassStrategy *>(pass_builder_.get()));
} else if (use_ipu()) {
VLOG(1) << "IpuPassStrategy has been used.";
pass_builder_.reset(new IpuPassStrategy(
*static_cast<IpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<IpuPassStrategy>(
*static_cast<IpuPassStrategy *>(pass_builder_.get()));
} else if (use_xpu()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between CPU and XPU."));
pass_builder_.reset(new XpuPassStrategy(
*static_cast<XpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<XpuPassStrategy>(
*static_cast<XpuPassStrategy *>(pass_builder_.get()));
} else if (use_custom_device()) {
PADDLE_ENFORCE_EQ(
use_gpu(),
false,
platform::errors::InvalidArgument(
"Only one choice can be made between GPU and CustomDevice."));
pass_builder_.reset(new CustomDevicePassStrategy(
*static_cast<CustomDevicePassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<CustomDevicePassStrategy>(
*static_cast<CustomDevicePassStrategy *>(pass_builder_.get()));
} else {
pass_builder_.reset(new CpuPassStrategy(
*static_cast<CpuPassStrategy *>(pass_builder_.get())));
pass_builder_ = std::make_unique<CpuPassStrategy>(
*static_cast<CpuPassStrategy *>(pass_builder_.get()));
}
}
......
......@@ -1454,8 +1454,8 @@ void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) {
auto& allocator = cuda_graph_map_[id];
auto& ref_cnt = cuda_graph_ref_cnt_[id];
if (allocator.get() == nullptr) {
allocator.reset(
new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
allocator = std::make_unique<AllocatorFacadePrivate>(
/*allow_free_idle_chunk=*/false);
VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id;
} else {
VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id;
......
......@@ -25,7 +25,7 @@ BufferedAllocator::BufferedAllocator(std::shared_ptr<Allocator> allocator)
platform::errors::InvalidArgument(
"Underlying allocator of BufferedAllocator is NULL"));
if (underlying_allocator_->IsAllocThreadSafe()) {
mtx_.reset(new std::mutex());
mtx_ = std::make_unique<std::mutex>();
}
}
......
......@@ -21,11 +21,11 @@ namespace allocation {
ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
: place_(p) {
if (platform::is_gpu_place(place_)) {
buddy_allocator_.reset(new memory::detail::BuddyAllocator(
buddy_allocator_ = std::make_unique<memory::detail::BuddyAllocator>(
std::unique_ptr<memory::detail::SystemAllocator>(
new memory::detail::GPUAllocator(place_.device)),
platform::GpuMinChunkSize(),
platform::GpuMaxChunkSize()));
platform::GpuMaxChunkSize());
} else {
PADDLE_THROW(platform::errors::Unavailable(
"Thread local allocator only supports CUDAPlace now."));
......
......@@ -135,7 +135,7 @@ class RecordedGpuMallocHelper {
explicit RecordedGpuMallocHelper(int dev_id, uint64_t limit_size = 0)
: dev_id_(dev_id), limit_size_(limit_size) {
if (NeedRecord()) {
mtx_.reset(new std::mutex());
mtx_ = std::make_unique<std::mutex>();
}
if (FLAGS_enable_gpu_memory_usage_log) {
......
......@@ -1048,11 +1048,11 @@ void GPUContext::ClearDnnAttr() { return impl_->ClearDnnAttr(); }
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
GPUPinnedContext::GPUPinnedContext() {
eigen_device_.reset(new Eigen::DefaultDevice());
eigen_device_ = std::make_unique<Eigen::DefaultDevice>();
}
GPUPinnedContext::GPUPinnedContext(GPUPinnedPlace place) : place_(place) {
eigen_device_.reset(new Eigen::DefaultDevice());
eigen_device_ = std::make_unique<Eigen::DefaultDevice>();
}
Eigen::DefaultDevice* GPUPinnedContext::eigen_device() const {
......
......@@ -60,7 +60,7 @@ bool Stream::Init(const Place& place,
phi::DeviceManager::SetDevice(place_);
device_->CreateStream(this, priority, flag);
callback_manager_.reset(new CallbackManager(this));
callback_manager_ = std::make_unique<CallbackManager>(this);
VLOG(3) << "Init Stream: " << stream_ << ", place: " << place_
<< ", priority: " << static_cast<int>(priority)
<< ", flag:" << static_cast<int>(flag);
......
......@@ -47,15 +47,16 @@ void ThreadPool::Init() {
num_threads,
0,
phi::errors::InvalidArgument("The number of threads is 0."));
threadpool_.reset(new ThreadPool(num_threads));
threadpool_ = std::make_unique<ThreadPool>(num_threads);
}
}
ThreadPool::ThreadPool(int num_threads) : running_(true) {
threads_.resize(num_threads);
for (auto& thread : threads_) {
// TODO(Yancey1989): binding the thread on the specify CPU number
thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this)));
// TODO(Yancey1989): binding the thread on the specify CPU numberw
thread =
std::make_unique<std::thread>(std::bind(&ThreadPool::TaskLoop, this));
}
}
......@@ -111,7 +112,7 @@ ThreadPool* ThreadPoolIO::GetInstanceIO() {
void ThreadPoolIO::InitIO() {
if (io_threadpool_.get() == nullptr) {
// TODO(typhoonzero1986): make this configurable
io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size));
io_threadpool_ = std::make_unique<ThreadPool>(FLAGS_io_threadpool_size);
}
}
} // namespace phi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册