未验证 提交 2c307457 编写于 作者: C cyberslack_lee 提交者: GitHub

[clang-tidy] No.31 enable modernize-use-bool-literals (#56216)

上级 0cae0151
......@@ -179,7 +179,7 @@ modernize-redundant-void-arg,
-modernize-replace-random-shuffle,
-modernize-shrink-to-fit,
-modernize-unary-static-assert,
-modernize-use-bool-literals,
modernize-use-bool-literals,
modernize-use-emplace,
modernize-use-equals-default,
-modernize-use-equals-delete,
......
......@@ -213,7 +213,7 @@ void HogwildWorker::TrainFilesWithProfiler() {
#if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS)
device_reader_->InitGraphTrainResource();
#endif
while (1) {
while (true) {
cur_batch = device_reader_->Next();
#if defined(PADDLE_WITH_GPU_GRAPH)
if (is_multi_node) {
......@@ -348,7 +348,7 @@ void HogwildWorker::TrainFiles() {
#if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS)
device_reader_->InitGraphTrainResource();
#endif
while (1) {
while (true) {
cur_batch = device_reader_->Next();
#if defined(PADDLE_WITH_GPU_GRAPH)
if (is_multi_node) {
......
......@@ -152,7 +152,7 @@ static int shell_popen_fork_internal(const char* real_cmd,
static int read_from_pipe(FILE* fp, std::string* output) {
std::array<char, 4096> buf;
while (1) {
while (true) {
int n = fread(buf.data(), 1, 4096, fp);
if (n <= 0) {
break;
......
......@@ -153,7 +153,7 @@ bool TransferLayoutElimPass::AllInputIsTransferlayout(
for (auto var : op_node->inputs) {
// If this input is a 1D persistable tensor,we allow transfer_layout not
// appear before this var, but temporarily diasble this if.
if (var->Var()->Persistable() && 0) {
if (var->Var()->Persistable() && false) {
auto var_dims =
scope->FindVar(var->Name())->GetMutable<phi::DenseTensor>()->dims();
if (var_dims.size() == 1) {
......
......@@ -59,7 +59,7 @@ void TrainerBase::DumpWork(int tid) {
// GetDumpPath is implemented in each Trainer
std::string path = GetDumpPath(tid);
std::shared_ptr<FILE> fp = fs_open_write(path, &err_no, dump_converter_);
while (1) {
while (true) {
std::string out_str;
if (!queue_->Get(out_str)) {
break;
......
......@@ -328,10 +328,10 @@ void IRPassManager::CreatePasses(Argument *argument,
argument->nnadapter_model_cache_token()));
} else if (pass_name == "fc_fuse_pass") {
pass->Set("use_gpu", new bool(argument->use_gpu()));
bool fc_mkldnn_pass = 0;
bool fc_mkldnn_pass = false;
for (const std::string &pass_n : passes) {
if (pass_n == "fc_mkldnn_pass") {
fc_mkldnn_pass = 1;
fc_mkldnn_pass = true;
}
}
bool use_fc_padding = !fc_mkldnn_pass && argument->use_fc_padding();
......
......@@ -1708,7 +1708,7 @@ CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
// TODO(NHZlX): Should add the link to the doc of
// paddle_infer::CreatePredictor<paddle_infer::Config>
if (config.glog_info_disabled()) {
FLAGS_logtostderr = 1;
FLAGS_logtostderr = true;
FLAGS_minloglevel = 2; // GLOG_ERROR
}
......
......@@ -198,7 +198,7 @@ std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kONNXRuntime>(
const AnalysisConfig &config) {
if (config.glog_info_disabled()) {
FLAGS_logtostderr = 1;
FLAGS_logtostderr = true;
FLAGS_minloglevel = 2; // GLOG_ERROR
}
......
......@@ -282,7 +282,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool(
BuddyAllocator::PoolSet::iterator BuddyAllocator::FindExistChunk(size_t size) {
size_t index = 0;
while (1) {
while (true) {
auto it = pool_.lower_bound(IndexSizeAddress(index, size, nullptr));
// no match chunk memory
......
......@@ -107,11 +107,11 @@ class EmbeddingEltWiseLayerNormOp : public framework::OperatorWithKernel {
const framework::ExecutionContext& ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("Embs");
auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0;
bool flag = false;
for (auto* input : inputs) {
if (input->IsInitialized() && input->numel() > 0) {
input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1;
flag = true;
break;
}
}
......
......@@ -135,11 +135,11 @@ class FusedSeqpoolCVMOp : public framework::OperatorWithKernel {
const framework::ExecutionContext& ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0;
bool flag = false;
for (auto* input : inputs) {
if (input->IsInitialized() && input->numel() > 0) {
input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1;
flag = true;
break;
}
}
......
......@@ -91,11 +91,11 @@ phi::KernelKey GetConcatExpectedKernelType(
(void)op_ptr;
auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0;
bool flag = false;
for (auto* input : inputs) {
if (input->IsInitialized()) {
input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1;
flag = true;
break;
}
}
......
......@@ -93,11 +93,11 @@ class PartialConcatOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0;
bool flag = false;
for (auto *input : inputs) {
if (input->IsInitialized()) {
input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1;
flag = true;
break;
}
}
......
......@@ -95,11 +95,11 @@ class PartialSumOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto input_data_type = framework::proto::VarType::Type(0);
bool flag = 0;
bool flag = false;
for (auto *input : inputs) {
if (input->IsInitialized()) {
input_data_type = framework::TransToProtoVarType(input->dtype());
flag = 1;
flag = true;
break;
}
}
......
......@@ -296,7 +296,7 @@ void CUPTIAPI bufferCompleted(CUcontext ctx,
} else {
CUPTI_CALL(status);
}
} while (1);
} while (true);
size_t dropped;
CUPTI_CALL(
......
......@@ -2195,7 +2195,7 @@ void gpc_tristrip_clip(gpc_op op,
tn->active * sizeof(gpc_vertex),
const_cast<char *>("tristrip creation"));
v = 0;
if (0) {
if (false) {
lt = tn->v[RIGHT];
rt = tn->v[LEFT];
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册