未验证 提交 2c307457 编写于 作者: C cyberslack_lee 提交者: GitHub

[clang-tidy] No.31 enable modernize-use-bool-literals (#56216)

上级 0cae0151
...@@ -179,7 +179,7 @@ modernize-redundant-void-arg, ...@@ -179,7 +179,7 @@ modernize-redundant-void-arg,
-modernize-replace-random-shuffle, -modernize-replace-random-shuffle,
-modernize-shrink-to-fit, -modernize-shrink-to-fit,
-modernize-unary-static-assert, -modernize-unary-static-assert,
-modernize-use-bool-literals, modernize-use-bool-literals,
modernize-use-emplace, modernize-use-emplace,
modernize-use-equals-default, modernize-use-equals-default,
-modernize-use-equals-delete, -modernize-use-equals-delete,
......
...@@ -213,7 +213,7 @@ void HogwildWorker::TrainFilesWithProfiler() { ...@@ -213,7 +213,7 @@ void HogwildWorker::TrainFilesWithProfiler() {
#if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS) #if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS)
device_reader_->InitGraphTrainResource(); device_reader_->InitGraphTrainResource();
#endif #endif
while (1) { while (true) {
cur_batch = device_reader_->Next(); cur_batch = device_reader_->Next();
#if defined(PADDLE_WITH_GPU_GRAPH) #if defined(PADDLE_WITH_GPU_GRAPH)
if (is_multi_node) { if (is_multi_node) {
...@@ -348,7 +348,7 @@ void HogwildWorker::TrainFiles() { ...@@ -348,7 +348,7 @@ void HogwildWorker::TrainFiles() {
#if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS) #if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS)
device_reader_->InitGraphTrainResource(); device_reader_->InitGraphTrainResource();
#endif #endif
while (1) { while (true) {
cur_batch = device_reader_->Next(); cur_batch = device_reader_->Next();
#if defined(PADDLE_WITH_GPU_GRAPH) #if defined(PADDLE_WITH_GPU_GRAPH)
if (is_multi_node) { if (is_multi_node) {
......
...@@ -152,7 +152,7 @@ static int shell_popen_fork_internal(const char* real_cmd, ...@@ -152,7 +152,7 @@ static int shell_popen_fork_internal(const char* real_cmd,
static int read_from_pipe(FILE* fp, std::string* output) { static int read_from_pipe(FILE* fp, std::string* output) {
std::array<char, 4096> buf; std::array<char, 4096> buf;
while (1) { while (true) {
int n = fread(buf.data(), 1, 4096, fp); int n = fread(buf.data(), 1, 4096, fp);
if (n <= 0) { if (n <= 0) {
break; break;
......
...@@ -153,7 +153,7 @@ bool TransferLayoutElimPass::AllInputIsTransferlayout( ...@@ -153,7 +153,7 @@ bool TransferLayoutElimPass::AllInputIsTransferlayout(
for (auto var : op_node->inputs) { for (auto var : op_node->inputs) {
// If this input is a 1D persistable tensor,we allow transfer_layout not // If this input is a 1D persistable tensor,we allow transfer_layout not
// appear before this var, but temporarily diasble this if. // appear before this var, but temporarily diasble this if.
if (var->Var()->Persistable() && 0) { if (var->Var()->Persistable() && false) {
auto var_dims = auto var_dims =
scope->FindVar(var->Name())->GetMutable<phi::DenseTensor>()->dims(); scope->FindVar(var->Name())->GetMutable<phi::DenseTensor>()->dims();
if (var_dims.size() == 1) { if (var_dims.size() == 1) {
......
...@@ -59,7 +59,7 @@ void TrainerBase::DumpWork(int tid) { ...@@ -59,7 +59,7 @@ void TrainerBase::DumpWork(int tid) {
// GetDumpPath is implemented in each Trainer // GetDumpPath is implemented in each Trainer
std::string path = GetDumpPath(tid); std::string path = GetDumpPath(tid);
std::shared_ptr<FILE> fp = fs_open_write(path, &err_no, dump_converter_); std::shared_ptr<FILE> fp = fs_open_write(path, &err_no, dump_converter_);
while (1) { while (true) {
std::string out_str; std::string out_str;
if (!queue_->Get(out_str)) { if (!queue_->Get(out_str)) {
break; break;
......
...@@ -328,10 +328,10 @@ void IRPassManager::CreatePasses(Argument *argument, ...@@ -328,10 +328,10 @@ void IRPassManager::CreatePasses(Argument *argument,
argument->nnadapter_model_cache_token())); argument->nnadapter_model_cache_token()));
} else if (pass_name == "fc_fuse_pass") { } else if (pass_name == "fc_fuse_pass") {
pass->Set("use_gpu", new bool(argument->use_gpu())); pass->Set("use_gpu", new bool(argument->use_gpu()));
bool fc_mkldnn_pass = 0; bool fc_mkldnn_pass = false;
for (const std::string &pass_n : passes) { for (const std::string &pass_n : passes) {
if (pass_n == "fc_mkldnn_pass") { if (pass_n == "fc_mkldnn_pass") {
fc_mkldnn_pass = 1; fc_mkldnn_pass = true;
} }
} }
bool use_fc_padding = !fc_mkldnn_pass && argument->use_fc_padding(); bool use_fc_padding = !fc_mkldnn_pass && argument->use_fc_padding();
......
...@@ -1708,7 +1708,7 @@ CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>( ...@@ -1708,7 +1708,7 @@ CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
// TODO(NHZlX): Should add the link to the doc of // TODO(NHZlX): Should add the link to the doc of
// paddle_infer::CreatePredictor<paddle_infer::Config> // paddle_infer::CreatePredictor<paddle_infer::Config>
if (config.glog_info_disabled()) { if (config.glog_info_disabled()) {
FLAGS_logtostderr = 1; FLAGS_logtostderr = true;
FLAGS_minloglevel = 2; // GLOG_ERROR FLAGS_minloglevel = 2; // GLOG_ERROR
} }
......
...@@ -198,7 +198,7 @@ std::unique_ptr<PaddlePredictor> ...@@ -198,7 +198,7 @@ std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kONNXRuntime>( CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kONNXRuntime>(
const AnalysisConfig &config) { const AnalysisConfig &config) {
if (config.glog_info_disabled()) { if (config.glog_info_disabled()) {
FLAGS_logtostderr = 1; FLAGS_logtostderr = true;
FLAGS_minloglevel = 2; // GLOG_ERROR FLAGS_minloglevel = 2; // GLOG_ERROR
} }
......
...@@ -282,7 +282,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool( ...@@ -282,7 +282,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) { BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
size_t index = 0; size_t index = 0;
while (1) { while (true) {
auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr)); auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
// no match chunk memory // no match chunk memory
......
...@@ -107,11 +107,11 @@ class EmbeddingEltWiseLayerNormOp : public framework::OperatorWithKernel { ...@@ -107,11 +107,11 @@ class EmbeddingEltWiseLayerNormOp : public framework::OperatorWithKernel {
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("Embs"); auto inputs = ctx.MultiInput<phi::DenseTensor>("Embs");
auto input_data_type = framework::proto::VarType::Type(0); auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0; bool flag = false;
for (auto* input : inputs) { for (auto* input : inputs) {
if (input->IsInitialized() && input->numel() > 0) { if (input->IsInitialized() && input->numel() > 0) {
input_data_type = framework::TransToProtoVarType(input->dtype()); input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1; flag = true;
break; break;
} }
} }
......
...@@ -135,11 +135,11 @@ class FusedSeqpoolCVMOp : public framework::OperatorWithKernel { ...@@ -135,11 +135,11 @@ class FusedSeqpoolCVMOp : public framework::OperatorWithKernel {
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X"); auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0); auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0; bool flag = false;
for (auto* input : inputs) { for (auto* input : inputs) {
if (input->IsInitialized() && input->numel() > 0) { if (input->IsInitialized() && input->numel() > 0) {
input_data_type = framework::TransToProtoVarType(input->dtype()); input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1; flag = true;
break; break;
} }
} }
......
...@@ -91,11 +91,11 @@ phi::KernelKey GetConcatExpectedKernelType( ...@@ -91,11 +91,11 @@ phi::KernelKey GetConcatExpectedKernelType(
(void)op_ptr; (void)op_ptr;
auto inputs = ctx.MultiInput<phi::DenseTensor>("X"); auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0); auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0; bool flag = false;
for (auto* input : inputs) { for (auto* input : inputs) {
if (input->IsInitialized()) { if (input->IsInitialized()) {
input_data_type = framework::TransToProtoVarType(input->dtype()); input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1; flag = true;
break; break;
} }
} }
......
...@@ -93,11 +93,11 @@ class PartialConcatOp : public framework::OperatorWithKernel { ...@@ -93,11 +93,11 @@ class PartialConcatOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X"); auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0); auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0; bool flag = false;
for (auto *input : inputs) { for (auto *input : inputs) {
if (input->IsInitialized()) { if (input->IsInitialized()) {
input_data_type = framework::TransToProtoVarType(input->dtype()); input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1; flag = true;
break; break;
} }
} }
......
...@@ -95,11 +95,11 @@ class PartialSumOp : public framework::OperatorWithKernel { ...@@ -95,11 +95,11 @@ class PartialSumOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X"); auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0); auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0; bool flag = false;
for (auto *input : inputs) { for (auto *input : inputs) {
if (input->IsInitialized()) { if (input->IsInitialized()) {
input_data_type = framework::TransToProtoVarType(input->dtype()); input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1; flag = true;
break; break;
} }
} }
......
...@@ -296,7 +296,7 @@ void CUPTIAPI bufferCompleted(CUcontext ctx, ...@@ -296,7 +296,7 @@ void CUPTIAPI bufferCompleted(CUcontext ctx,
} else { } else {
CUPTI_CALL(status); CUPTI_CALL(status);
} }
} while (1); } while (true);
size_t dropped; size_t dropped;
CUPTI_CALL( CUPTI_CALL(
......
...@@ -2195,7 +2195,7 @@ void gpc_tristrip_clip(gpc_op op, ...@@ -2195,7 +2195,7 @@ void gpc_tristrip_clip(gpc_op op,
tn->active * sizeof(gpc_vertex), tn->active * sizeof(gpc_vertex),
const_cast<char *>("tristrip creation")); const_cast<char *>("tristrip creation"));
v = 0; v = 0;
if (0) { if (false) {
lt = tn->v[RIGHT]; lt = tn->v[RIGHT];
rt = tn->v[LEFT]; rt = tn->v[LEFT];
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册