未验证 提交 25f78de0 编写于 作者: Z Zhenghai Zhang 提交者: GitHub

[clang-tidy] NO.33,64 enable...

[clang-tidy] NO.33,64 enable `bugprone-signed-char-misuse`,`clang-analyzer-optin.portability.UnixAPI` check (#56744)

* enable bugprone-signed-char-misuse&clang-analyzer-optin.portability.UnixAPI check

* fix bugprone-signed-char-misuse

* fix bug
上级 54e44751
...@@ -24,7 +24,7 @@ bugprone-misplaced-widening-cast, ...@@ -24,7 +24,7 @@ bugprone-misplaced-widening-cast,
-bugprone-not-null-terminated-result, -bugprone-not-null-terminated-result,
-bugprone-parent-virtual-call, -bugprone-parent-virtual-call,
-bugprone-posix-return, -bugprone-posix-return,
-bugprone-signed-char-misuse, bugprone-signed-char-misuse,
-bugprone-sizeof-container, -bugprone-sizeof-container,
-bugprone-sizeof-expression, -bugprone-sizeof-expression,
-bugprone-string-constructor, -bugprone-string-constructor,
...@@ -91,7 +91,7 @@ clang-analyzer-optin.cplusplus.UninitializedObject, ...@@ -91,7 +91,7 @@ clang-analyzer-optin.cplusplus.UninitializedObject,
-clang-analyzer-optin.osx.cocoa.localizability.NonLocalizedStringChecker, -clang-analyzer-optin.osx.cocoa.localizability.NonLocalizedStringChecker,
-clang-analyzer-optin.performance.GCDAntipattern, -clang-analyzer-optin.performance.GCDAntipattern,
-clang-analyzer-optin.performance.Padding, -clang-analyzer-optin.performance.Padding,
-clang-analyzer-optin.portability.UnixAPI, clang-analyzer-optin.portability.UnixAPI,
-clang-analyzer-osx.API, -clang-analyzer-osx.API,
-clang-analyzer-osx.MIG, -clang-analyzer-osx.MIG,
-clang-analyzer-osx.NSOrCFErrorDerefChecker, -clang-analyzer-osx.NSOrCFErrorDerefChecker,
......
...@@ -321,7 +321,7 @@ void AllReduceOpHandle::NCCLAllReduceFunc( ...@@ -321,7 +321,7 @@ void AllReduceOpHandle::NCCLAllReduceFunc(
void AllReduceOpHandle::SyncNCCLAllReduce() { void AllReduceOpHandle::SyncNCCLAllReduce() {
if (FLAGS_sync_nccl_allreduce) { if (FLAGS_sync_nccl_allreduce) {
for (auto &p : places_) { for (auto &p : places_) {
int dev_id = p.device; int dev_id = p.device; // NOLINT
auto *nccl_ctxs = auto *nccl_ctxs =
nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_); nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_);
auto &nccl_ctx = nccl_ctxs->at(dev_id); auto &nccl_ctx = nccl_ctxs->at(dev_id);
......
...@@ -90,7 +90,7 @@ void BroadcastOpHandle::BroadcastOneVar( ...@@ -90,7 +90,7 @@ void BroadcastOpHandle::BroadcastOneVar(
} else if (platform::is_gpu_place(in_tensor.place())) { } else if (platform::is_gpu_place(in_tensor.place())) {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
VarHandle *out_handle = nullptr; VarHandle *out_handle = nullptr;
int root_id = in_tensor.place().device; int root_id = in_tensor.place().device; // NOLINT
std::vector<std::function<void()>> broadcast_calls; std::vector<std::function<void()>> broadcast_calls;
int type = platform::ToNCCLDataType( int type = platform::ToNCCLDataType(
...@@ -101,7 +101,7 @@ void BroadcastOpHandle::BroadcastOneVar( ...@@ -101,7 +101,7 @@ void BroadcastOpHandle::BroadcastOneVar(
Variable *out_var = var_scopes.at(out_var_handle->scope_idx()) Variable *out_var = var_scopes.at(out_var_handle->scope_idx())
->FindVar(out_var_handle->name()); ->FindVar(out_var_handle->name());
int dst_id = out_var_handle->place().device; int dst_id = out_var_handle->place().device; // NOLINT
auto &nccl_ctx = nccl_ctxs_->at(dst_id); auto &nccl_ctx = nccl_ctxs_->at(dst_id);
......
...@@ -47,7 +47,7 @@ OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { // NOLINT ...@@ -47,7 +47,7 @@ OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { // NOLINT
void OpHandleBase::InitCUDA() { void OpHandleBase::InitCUDA() {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
for (auto &p : dev_ctxes_) { for (auto &p : dev_ctxes_) {
int dev_id = p.first.device; int dev_id = p.first.device; // NOLINT
platform::SetDeviceId(dev_id); platform::SetDeviceId(dev_id);
#ifdef PADDLE_WITH_HIP #ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS( PADDLE_ENFORCE_GPU_SUCCESS(
...@@ -61,7 +61,7 @@ void OpHandleBase::InitCUDA() { ...@@ -61,7 +61,7 @@ void OpHandleBase::InitCUDA() {
for (auto &out_var : outputs_) { for (auto &out_var : outputs_) {
auto *out_var_handle = dynamic_cast<VarHandle *>(out_var); auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
if (out_var_handle) { if (out_var_handle) {
int dev_id = out_var_handle->place().device; int dev_id = out_var_handle->place().device; // NOLINT
out_var_handle->SetGenerateEvent(events_.at(dev_id)); out_var_handle->SetGenerateEvent(events_.at(dev_id));
} }
} }
...@@ -74,7 +74,7 @@ void OpHandleBase::InitCUDA() { ...@@ -74,7 +74,7 @@ void OpHandleBase::InitCUDA() {
Name(), Name(),
dev_ctxes_.size())); dev_ctxes_.size()));
auto &place = dev_ctxes_.begin()->first; auto &place = dev_ctxes_.begin()->first;
int dev_id = place.device; int dev_id = place.device; // NOLINT
for (auto &out_var : outputs_) { for (auto &out_var : outputs_) {
auto *out_var_handle = dynamic_cast<VarHandle *>(out_var); auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
if (out_var_handle) { if (out_var_handle) {
......
...@@ -45,7 +45,7 @@ static std::vector<std::unique_ptr<ir::Graph>> SeparateMultiDevicesGraph( ...@@ -45,7 +45,7 @@ static std::vector<std::unique_ptr<ir::Graph>> SeparateMultiDevicesGraph(
for (auto &op : op_handles) { for (auto &op : op_handles) {
auto &dev_ctx = op->DeviceContext(); auto &dev_ctx = op->DeviceContext();
auto &p = dev_ctx.begin()->first; auto &p = dev_ctx.begin()->first;
int dev_id = p.device; int dev_id = p.device; // NOLINT
auto &dev_dummys = graphs[dev_id]->Get<GraphDepVars>(kGraphDepVars); auto &dev_dummys = graphs[dev_id]->Get<GraphDepVars>(kGraphDepVars);
graphs[dev_id]->AddNode(graph->RemoveNode(op->Node()).release()); graphs[dev_id]->AddNode(graph->RemoveNode(op->Node()).release());
......
...@@ -189,13 +189,13 @@ void ReduceOpHandle::RunImpl() { ...@@ -189,13 +189,13 @@ void ReduceOpHandle::RunImpl() {
out_var_handle->place(), pre_in.dtype()); out_var_handle->place(), pre_in.dtype());
auto out_p = out_var_handle->place(); auto out_p = out_var_handle->place();
int root_id = out_p.device; int root_id = out_p.device; // NOLINT
std::vector<std::function<void()>> all_reduce_calls; std::vector<std::function<void()>> all_reduce_calls;
for (size_t i = 0; i < var_scopes.size(); ++i) { for (size_t i = 0; i < var_scopes.size(); ++i) {
auto &p = in_places[i]; auto &p = in_places[i];
auto &lod_tensor = *lod_tensors[i]; auto &lod_tensor = *lod_tensors[i];
int dev_id = p.device; int dev_id = p.device; // NOLINT
auto &nccl_ctx = nccl_ctxs_->at(dev_id); auto &nccl_ctx = nccl_ctxs_->at(dev_id);
void *buffer = const_cast<void *>(lod_tensor.data()); void *buffer = const_cast<void *>(lod_tensor.data());
......
...@@ -99,7 +99,7 @@ struct DLDeviceVisitor { ...@@ -99,7 +99,7 @@ struct DLDeviceVisitor {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
::DLDevice device; ::DLDevice device;
device.device_type = kDLGPU; device.device_type = kDLGPU;
device.device_id = place.device; device.device_id = place.device; // NOLINT
return device; return device;
#else #else
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
......
...@@ -80,7 +80,7 @@ void NCCLParallelContext::Init() { ...@@ -80,7 +80,7 @@ void NCCLParallelContext::Init() {
} }
BcastNCCLId(nccl_ids, 0, server_fd); BcastNCCLId(nccl_ids, 0, server_fd);
int gpu_id = place_.device; int gpu_id = place_.device; // NOLINT
for (int ring_id = 0; ring_id < strategy_.nrings_; ring_id++) { for (int ring_id = 0; ring_id < strategy_.nrings_; ring_id++) {
VLOG(0) << "init nccl context nranks: " << strategy_.nranks_ VLOG(0) << "init nccl context nranks: " << strategy_.nranks_
<< " local rank: " << strategy_.local_rank_ << " gpu id: " << gpu_id << " local rank: " << strategy_.local_rank_ << " gpu id: " << gpu_id
...@@ -115,7 +115,7 @@ void NCCLParallelContext::InitWithRingID(int ring_id) { ...@@ -115,7 +115,7 @@ void NCCLParallelContext::InitWithRingID(int ring_id) {
} }
BcastNCCLId(nccl_ids, 0, server_fd); BcastNCCLId(nccl_ids, 0, server_fd);
int gpu_id = place_.device; int gpu_id = place_.device; // NOLINT
VLOG(0) << "init nccl context nranks: " << strategy_.nranks_ VLOG(0) << "init nccl context nranks: " << strategy_.nranks_
<< " local rank: " << strategy_.local_rank_ << " gpu id: " << gpu_id << " local rank: " << strategy_.local_rank_ << " gpu id: " << gpu_id
<< " ring id: " << ring_id; << " ring id: " << ring_id;
......
...@@ -99,7 +99,7 @@ void XCCLParallelContext::Init() { ...@@ -99,7 +99,7 @@ void XCCLParallelContext::Init() {
} }
BcastXCCLId(xccl_ids, 0, server_fd); BcastXCCLId(xccl_ids, 0, server_fd);
int dev_id = place_.device; int dev_id = place_.device; // NOLINT
for (int ring_id = 0; ring_id < strategy_.nrings_; ring_id++) { for (int ring_id = 0; ring_id < strategy_.nrings_; ring_id++) {
VLOG(0) << "init nccl context nranks: " << strategy_.nranks_ VLOG(0) << "init nccl context nranks: " << strategy_.nranks_
<< " local rank: " << strategy_.local_rank_ << " dev id: " << dev_id << " local rank: " << strategy_.local_rank_ << " dev id: " << dev_id
...@@ -136,7 +136,7 @@ void XCCLParallelContext::InitWithRingID(int ring_id) { ...@@ -136,7 +136,7 @@ void XCCLParallelContext::InitWithRingID(int ring_id) {
} }
BcastXCCLId(xccl_ids, 0, server_fd); BcastXCCLId(xccl_ids, 0, server_fd);
int dev_id = place_.device; int dev_id = place_.device; // NOLINT
VLOG(0) << "init xccl context nranks: " << strategy_.nranks_ VLOG(0) << "init xccl context nranks: " << strategy_.nranks_
<< " local rank: " << strategy_.local_rank_ << " dev id: " << dev_id << " local rank: " << strategy_.local_rank_ << " dev id: " << dev_id
<< " ring id: " << ring_id; << " ring id: " << ring_id;
......
...@@ -48,7 +48,7 @@ void CUDAManagedAllocator::FreeImpl(phi::Allocation* allocation) { ...@@ -48,7 +48,7 @@ void CUDAManagedAllocator::FreeImpl(phi::Allocation* allocation) {
phi::Allocation* CUDAManagedAllocator::AllocateImpl(size_t size) { phi::Allocation* CUDAManagedAllocator::AllocateImpl(size_t size) {
std::call_once(once_flag_, [this] { platform::SetDeviceId(place_.device); }); std::call_once(once_flag_, [this] { platform::SetDeviceId(place_.device); });
int dev_id = place_.device; int dev_id = place_.device; // NOLINT
void* ptr; void* ptr;
auto result = platform::RecordedGpuMalloc(&ptr, auto result = platform::RecordedGpuMalloc(&ptr,
size, size,
......
...@@ -45,7 +45,7 @@ CUDAVirtualMemAllocator::CUDAVirtualMemAllocator( ...@@ -45,7 +45,7 @@ CUDAVirtualMemAllocator::CUDAVirtualMemAllocator(
// case, the allocation will be pinnded device memory local to a given device. // case, the allocation will be pinnded device memory local to a given device.
prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; prop.type = CU_MEM_ALLOCATION_TYPE_PINNED;
prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
prop.location.id = place.device; prop.location.id = place.device; // NOLINT
prop_ = prop; prop_ = prop;
// Prepare the access descriptor array indicating where and how the backings // Prepare the access descriptor array indicating where and how the backings
......
...@@ -50,7 +50,7 @@ BufferedReader::BufferedReader( ...@@ -50,7 +50,7 @@ BufferedReader::BufferedReader(
VLOG(1) << "BufferedReader"; VLOG(1) << "BufferedReader";
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
if (platform::is_gpu_place(place_) && !pin_memory) { if (platform::is_gpu_place(place_) && !pin_memory) {
int dev_idx = place_.device; int dev_idx = place_.device; // NOLINT
compute_stream_ = compute_stream_ =
((phi::GPUContext *)(platform::DeviceContextPool::Instance().Get( ((phi::GPUContext *)(platform::DeviceContextPool::Instance().Get(
place_))) place_)))
......
...@@ -27,7 +27,7 @@ struct CustomDeviceEventWrapper { ...@@ -27,7 +27,7 @@ struct CustomDeviceEventWrapper {
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"Required device shall be CustomPlace, but received %d. ", place)); "Required device shall be CustomPlace, but received %d. ", place));
device_id_ = place.device; device_id_ = place.device; // NOLINT
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
device_id_, device_id_,
-1, -1,
......
...@@ -27,7 +27,7 @@ struct CUDADeviceEventWrapper { ...@@ -27,7 +27,7 @@ struct CUDADeviceEventWrapper {
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"Required device shall be CUDAPlace, but received %d. ", place)); "Required device shall be CUDAPlace, but received %d. ", place));
device_id_ = place.device; device_id_ = place.device; // NOLINT
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
device_id_, device_id_,
-1, -1,
......
...@@ -420,7 +420,7 @@ struct iinfo { ...@@ -420,7 +420,7 @@ struct iinfo {
dtype = "int64"; dtype = "int64";
break; break;
case framework::proto::VarType::INT8: case framework::proto::VarType::INT8:
min = std::numeric_limits<int8_t>::min(); min = std::numeric_limits<int8_t>::min(); // NOLINT
max = std::numeric_limits<int8_t>::max(); max = std::numeric_limits<int8_t>::max();
bits = 8; bits = 8;
dtype = "int8"; dtype = "int8";
......
...@@ -935,7 +935,7 @@ class CustomDevice : public DeviceInterface { ...@@ -935,7 +935,7 @@ class CustomDevice : public DeviceInterface {
private: private:
inline int PlaceToIdNoCheck(const Place& place) { inline int PlaceToIdNoCheck(const Place& place) {
int dev_id = place.GetDeviceId(); int dev_id = place.GetDeviceId(); // NOLINT
return dev_id; return dev_id;
} }
......
...@@ -92,7 +92,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx, ...@@ -92,7 +92,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx,
for (int j = 0; j < remain; j++) { // for each sample_other_dims for (int j = 0; j < remain; j++) { // for each sample_other_dims
int idx = i * remain + j; // this sample's label_idx. for 1d case, int idx = i * remain + j; // this sample's label_idx. for 1d case,
// remain=1 and j=0, so, idx = i // remain=1 and j=0, so, idx = i
auto lbl = static_cast<int64_t>(label_data[idx]); auto lbl = static_cast<int64_t>(label_data[idx]); // NOLINT
if (lbl == ignore_index) { if (lbl == ignore_index) {
for (int k = 0; k < axis_dim; ++k) { // for each class id's label for (int k = 0; k < axis_dim; ++k) { // for each class id's label
logit_grad_data[i * d + k * remain + j] = 0; logit_grad_data[i * d + k * remain + j] = 0;
...@@ -144,7 +144,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx, ...@@ -144,7 +144,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx,
for (int j = 0; j < remain; j++) { // for each sample_other_dims for (int j = 0; j < remain; j++) { // for each sample_other_dims
int idx = i * remain + j; // this sample's label_idx. for 1d case, int idx = i * remain + j; // this sample's label_idx. for 1d case,
// remain=1 and j=0, so, idx = i // remain=1 and j=0, so, idx = i
auto lbl = static_cast<int64_t>(label_data[idx]); auto lbl = static_cast<int64_t>(label_data[idx]); // NOLINT
if (lbl == ignore_index) { if (lbl == ignore_index) {
for (int k = 0; k < axis_dim; ++k) { // for each class id's label for (int k = 0; k < axis_dim; ++k) { // for each class id's label
logit_grad_data[i * d + k * remain + j] = 0; logit_grad_data[i * d + k * remain + j] = 0;
......
...@@ -66,8 +66,8 @@ void Array2Poly(const T* box, ...@@ -66,8 +66,8 @@ void Array2Poly(const T* box,
(*poly).contour = (*poly).contour =
(phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list));
(*poly).contour->num_vertices = static_cast<int>(pts_num); (*poly).contour->num_vertices = static_cast<int>(pts_num);
(*poly).contour->vertex = (*poly).contour->vertex = (phi::funcs::gpc_vertex*)malloc(
(phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); sizeof(phi::funcs::gpc_vertex) * pts_num); // NOLINT
for (size_t i = 0; i < pts_num; ++i) { for (size_t i = 0; i < pts_num; ++i) {
(*poly).contour->vertex[i].x = box[2 * i]; (*poly).contour->vertex[i].x = box[2 * i];
(*poly).contour->vertex[i].y = box[2 * i + 1]; (*poly).contour->vertex[i].y = box[2 * i + 1];
......
...@@ -51,7 +51,7 @@ struct HardLabelCrossEntropyCPUFunctorImpl { ...@@ -51,7 +51,7 @@ struct HardLabelCrossEntropyCPUFunctorImpl {
const auto* label_data = labels_->template data<U>(); const auto* label_data = labels_->template data<U>();
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_remain; j++) { for (int j = 0; j < num_remain; j++) {
int lbl = static_cast<int>(label_data[i * num_remain + j]); int lbl = static_cast<int>(label_data[i * num_remain + j]); // NOLINT
if (lbl != ignore_index_) { if (lbl != ignore_index_) {
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
lbl, lbl,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册