diff --git a/.clang-tidy b/.clang-tidy index 4f548de850ceb1d58ce1ed8f667ca9f61001c7da..2724875b6ffcc8ac3ae6e20dd45a830310a101c7 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -179,7 +179,7 @@ modernize-redundant-void-arg, -modernize-replace-random-shuffle, -modernize-shrink-to-fit, -modernize-unary-static-assert, --modernize-use-bool-literals, +modernize-use-bool-literals, modernize-use-emplace, modernize-use-equals-default, -modernize-use-equals-delete, diff --git a/paddle/fluid/framework/hogwild_worker.cc b/paddle/fluid/framework/hogwild_worker.cc index 4b2d089d628b4233409d42e91285d6f4a681a001..199978a6c4f6cebdf6bc72150394e63444af26ca 100644 --- a/paddle/fluid/framework/hogwild_worker.cc +++ b/paddle/fluid/framework/hogwild_worker.cc @@ -213,7 +213,7 @@ void HogwildWorker::TrainFilesWithProfiler() { #if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS) device_reader_->InitGraphTrainResource(); #endif - while (1) { + while (true) { cur_batch = device_reader_->Next(); #if defined(PADDLE_WITH_GPU_GRAPH) if (is_multi_node) { @@ -348,7 +348,7 @@ void HogwildWorker::TrainFiles() { #if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS) device_reader_->InitGraphTrainResource(); #endif - while (1) { + while (true) { cur_batch = device_reader_->Next(); #if defined(PADDLE_WITH_GPU_GRAPH) if (is_multi_node) { diff --git a/paddle/fluid/framework/io/shell.cc b/paddle/fluid/framework/io/shell.cc index 312122b45f3d637814504469c067c3d07cb5a949..46456df6e68f930029d42f6c760ce7c4a756931c 100644 --- a/paddle/fluid/framework/io/shell.cc +++ b/paddle/fluid/framework/io/shell.cc @@ -152,7 +152,7 @@ static int shell_popen_fork_internal(const char* real_cmd, static int read_from_pipe(FILE* fp, std::string* output) { std::array buf; - while (1) { + while (true) { int n = fread(buf.data(), 1, 4096, fp); if (n <= 0) { break; diff --git a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc index b75c3365c0529bea7694cf68bd01b75f6537906b..4a5ba86e8e0320202fa6da2b7d1864a03067830e 100644 --- a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc +++ b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc @@ -153,7 +153,7 @@ bool TransferLayoutElimPass::AllInputIsTransferlayout( for (auto var : op_node->inputs) { // If this input is a 1D persistable tensor,we allow transfer_layout not // appear before this var, but temporarily diasble this if. - if (var->Var()->Persistable() && 0) { + if (var->Var()->Persistable() && false) { auto var_dims = scope->FindVar(var->Name())->GetMutable()->dims(); if (var_dims.size() == 1) { diff --git a/paddle/fluid/framework/trainer.cc b/paddle/fluid/framework/trainer.cc index 96bf42559d117b74d4d040f39479fcc91b7f4059..280937a340a048023a6b790e93fef12d615bf305 100644 --- a/paddle/fluid/framework/trainer.cc +++ b/paddle/fluid/framework/trainer.cc @@ -59,7 +59,7 @@ void TrainerBase::DumpWork(int tid) { // GetDumpPath is implemented in each Trainer std::string path = GetDumpPath(tid); std::shared_ptr fp = fs_open_write(path, &err_no, dump_converter_); - while (1) { + while (true) { std::string out_str; if (!queue_->Get(out_str)) { break; diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index fdc80bb9a3eb920cc4a1e70c811874e07a6af529..b2343d130314eb69b0675a732aabe1bb7e33c008 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -328,10 +328,10 @@ void IRPassManager::CreatePasses(Argument *argument, argument->nnadapter_model_cache_token())); } else if (pass_name == "fc_fuse_pass") { pass->Set("use_gpu", new bool(argument->use_gpu())); - bool fc_mkldnn_pass = 0; + bool fc_mkldnn_pass = false; for (const std::string &pass_n : passes) { if (pass_n == "fc_mkldnn_pass") { - fc_mkldnn_pass = 1; + fc_mkldnn_pass = true; } } bool use_fc_padding = !fc_mkldnn_pass && argument->use_fc_padding(); diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index a07de4d47aa0e1487177d7aa67c2b848078d5612..63c1035ee157736a2f657c0fadd2b141f071d6f7 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1708,7 +1708,7 @@ CreatePaddlePredictor( // TODO(NHZlX): Should add the link to the doc of // paddle_infer::CreatePredictor if (config.glog_info_disabled()) { - FLAGS_logtostderr = 1; + FLAGS_logtostderr = true; FLAGS_minloglevel = 2; // GLOG_ERROR } diff --git a/paddle/fluid/inference/api/onnxruntime_predictor.cc b/paddle/fluid/inference/api/onnxruntime_predictor.cc index 232595378d2e415e6b421189f7a4f9af308ea039..4f8435ca505c0e52b925a412a6e1dd3d3681c5f0 100644 --- a/paddle/fluid/inference/api/onnxruntime_predictor.cc +++ b/paddle/fluid/inference/api/onnxruntime_predictor.cc @@ -198,7 +198,7 @@ std::unique_ptr CreatePaddlePredictor( const AnalysisConfig &config) { if (config.glog_info_disabled()) { - FLAGS_logtostderr = 1; + FLAGS_logtostderr = true; FLAGS_minloglevel = 2; // GLOG_ERROR } diff --git a/paddle/fluid/memory/allocation/buddy_allocator.cc b/paddle/fluid/memory/allocation/buddy_allocator.cc index 8de464754cb353b4b078088acc83c533e138a104..7c25170e56cbb18d68023f0c0914652dda714e73 100644 --- a/paddle/fluid/memory/allocation/buddy_allocator.cc +++ b/paddle/fluid/memory/allocation/buddy_allocator.cc @@ -282,7 +282,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool( BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) { size_t index = 0; - while (1) { + while (true) { auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr)); // no match chunk memory diff --git a/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc b/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc index f2fc0ee0c98e44106b53d5e18132d72d4c3a10b7..6f2c61a5cf4701c6217b56a959bf1a78fea41a29 100644 --- a/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cc @@ -107,11 +107,11 @@ class EmbeddingEltWiseLayerNormOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { auto inputs = ctx.MultiInput("Embs"); auto input_data_type = framework::proto::VarType::Type(0); - bool flag = 0; + bool flag = false; for (auto* input : inputs) { if (input->IsInitialized() && input->numel() > 0) { input_data_type = framework::TransToProtoVarType(input->dtype()); - flag = 1; + flag = true; break; } } diff --git a/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc b/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc index 3b086dfd98cf12549e296344ddc36beba7742c94..f6923d73c732507c27753c86327d6d84683a5854 100644 --- a/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc +++ b/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc @@ -135,11 +135,11 @@ class FusedSeqpoolCVMOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { auto inputs = ctx.MultiInput("X"); auto input_data_type = framework::proto::VarType::Type(0); - bool flag = 0; + bool flag = false; for (auto* input : inputs) { if (input->IsInitialized() && input->numel() > 0) { input_data_type = framework::TransToProtoVarType(input->dtype()); - flag = 1; + flag = true; break; } } diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.cc b/paddle/fluid/operators/generator/get_expected_kernel_func.cc index 81a2f3fb70699ecbd53b5535d1489d858b033aa7..3fe4eeec187f838c1aa6d9832bec70d4eac59fcd 100644 --- a/paddle/fluid/operators/generator/get_expected_kernel_func.cc +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.cc @@ -91,11 +91,11 @@ phi::KernelKey GetConcatExpectedKernelType( (void)op_ptr; auto inputs = ctx.MultiInput("X"); auto input_data_type = framework::proto::VarType::Type(0); - bool flag = 0; + bool flag = false; for (auto* input : inputs) { if (input->IsInitialized()) { input_data_type = framework::TransToProtoVarType(input->dtype()); - flag = 1; + flag = true; break; } } diff --git a/paddle/fluid/operators/partial_concat_op.cc b/paddle/fluid/operators/partial_concat_op.cc index f2f3da9f0511f1c89c221d564db213788904a628..d5b0f5630b91111443624bf1b5fe422156ee12bb 100644 --- a/paddle/fluid/operators/partial_concat_op.cc +++ b/paddle/fluid/operators/partial_concat_op.cc @@ -93,11 +93,11 @@ class PartialConcatOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { auto inputs = ctx.MultiInput("X"); auto input_data_type = framework::proto::VarType::Type(0); - bool flag = 0; + bool flag = false; for (auto *input : inputs) { if (input->IsInitialized()) { input_data_type = framework::TransToProtoVarType(input->dtype()); - flag = 1; + flag = true; break; } } diff --git a/paddle/fluid/operators/partial_sum_op.cc b/paddle/fluid/operators/partial_sum_op.cc index 4b130306825c678d7885716a0629feb73fd95230..0bba0381d209335aaa589be0c895270751816c84 100644 --- a/paddle/fluid/operators/partial_sum_op.cc +++ b/paddle/fluid/operators/partial_sum_op.cc @@ -95,11 +95,11 @@ class PartialSumOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { auto inputs = ctx.MultiInput("X"); auto input_data_type = framework::proto::VarType::Type(0); - bool flag = 0; + bool flag = false; for (auto *input : inputs) { if (input->IsInitialized()) { input_data_type = framework::TransToProtoVarType(input->dtype()); - flag = 1; + flag = true; break; } } diff --git a/paddle/phi/api/profiler/device_tracer.cc b/paddle/phi/api/profiler/device_tracer.cc index da7d80501108113380bd7cf49c0e1c6c048183a2..fc8e8037bb69457cadad74fd2e7acbc15e3607a8 100644 --- a/paddle/phi/api/profiler/device_tracer.cc +++ b/paddle/phi/api/profiler/device_tracer.cc @@ -296,7 +296,7 @@ void CUPTIAPI bufferCompleted(CUcontext ctx, } else { CUPTI_CALL(status); } - } while (1); + } while (true); size_t dropped; CUPTI_CALL( diff --git a/paddle/phi/kernels/funcs/gpc.cc b/paddle/phi/kernels/funcs/gpc.cc index f7e852d53a7cfe09a59c84067fdc456540673c1e..74683f3e0f2fce6600e6f22dc57c8655ed522786 100644 --- a/paddle/phi/kernels/funcs/gpc.cc +++ b/paddle/phi/kernels/funcs/gpc.cc @@ -2195,7 +2195,7 @@ void gpc_tristrip_clip(gpc_op op, tn->active * sizeof(gpc_vertex), const_cast("tristrip creation")); v = 0; - if (0) { + if (false) { lt = tn->v[RIGHT]; rt = tn->v[LEFT]; } else {