From b918100a02a48bb47b606f652c959f200989727d Mon Sep 17 00:00:00 2001 From: a_weng <54014960+Sahala08@users.noreply.github.com> Date: Sat, 1 Jul 2023 20:44:52 +0800 Subject: [PATCH] [CodeStyle][CINN] fix cinn cpplint codestyle about `[readability/todo]`, `[readability/check]` (#55022) --- .../auto_gen_rule/multi_level_tiling.cc | 2 +- .../auto_gen_rule/multi_level_tiling_test.cc | 2 +- paddle/cinn/common/cas.cc | 2 +- paddle/cinn/frontend/op_mappers/paddle/argsort.cc | 8 ++++---- paddle/cinn/hlir/op/contrib/argmax.cc | 8 ++++---- paddle/cinn/hlir/op/contrib/argmin.cc | 8 ++++---- paddle/cinn/hlir/op/contrib/sort.cc | 13 +++++++------ paddle/cinn/hlir/op/nn.cc | 5 +++-- paddle/cinn/hlir/pass/reduce_split_pass.cc | 4 ++-- paddle/cinn/hlir/pe/nn.cc | 4 ++-- paddle/cinn/ir/schedule_desc_test.cc | 3 ++- paddle/cinn/optim/vectorize_loops.cc | 2 +- paddle/cinn/runtime/tiny_runtime.cc | 4 ++-- paddle/cinn/utils/random_engine.h | 2 +- test/cpp/cinn/concrete_program_builder.h | 8 ++++---- 15 files changed, 39 insertions(+), 36 deletions(-) diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc index c35bed808a8..459031266a7 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc @@ -153,7 +153,7 @@ void MultiLevelTiling::ApplyTiling(ir::IRSchedule* ir_schedule, idx = &r_indices_; } else { idx = &s_indices_; - } // TODO: support more iterator variable types + } // TODO(zhhsplendid): support more iterator variable types int extent = ir_for->extent.as_int32(); // maybe int64? diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc index b0214310fe1..10c0ccc7348 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc @@ -148,7 +148,7 @@ TEST(MultiLevelTile, SimpleLoops) { test_func(&new_states[0]->ir_schedule); } -// TODO: fix in future +// TODO(SunNy820828449): fix in future /* TEST(MulitLevelTile, MatrixMultiply) { srand(0); diff --git a/paddle/cinn/common/cas.cc b/paddle/cinn/common/cas.cc index 9b8f1cf0f55..ab23d3eb1e6 100644 --- a/paddle/cinn/common/cas.cc +++ b/paddle/cinn/common/cas.cc @@ -2190,7 +2190,7 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) { }; { - // TODO : fix in future. + // TODO(SunNy820828449): fix in future. // std::vector a_args, b_args; // if (ap) // a_args = ap->operands(); diff --git a/paddle/cinn/frontend/op_mappers/paddle/argsort.cc b/paddle/cinn/frontend/op_mappers/paddle/argsort.cc index fb2e76d01ec..0f7c7c624ab 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/argsort.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/argsort.cc @@ -45,10 +45,10 @@ void ArgsortOpMapper(const paddle::cpp::OpDesc& op_desc, ctx.AddVar(indices_name, idx); ctx.AddVarModelToProgram(indices_name, idx->id); - // TODO: return the sorted tensor here. Now out[1] is a temporary tensor. - // this is because output 'Out' is never uesd in Paddle API, but CINN need to - // return 2 output vars to meet the op defination, this should be resolved - // after sort op restructured. + // TODO(lanxianghit): return the sorted tensor here. Now out[1] is a temporary + // tensor. this is because output 'Out' is never uesd in Paddle API, but CINN + // need to return 2 output vars to meet the op defination, this should be + // resolved after sort op restructured. ctx.AddVar(out_name, out[1]); ctx.AddVarModelToProgram(out_name, out[1]->id); } diff --git a/paddle/cinn/hlir/op/contrib/argmax.cc b/paddle/cinn/hlir/op/contrib/argmax.cc index 50ec423d794..72e005a2787 100644 --- a/paddle/cinn/hlir/op/contrib/argmax.cc +++ b/paddle/cinn/hlir/op/contrib/argmax.cc @@ -157,9 +157,9 @@ std::shared_ptr StrategyForArgmax( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: It needs to be rewritten according to the reduction_max operator to - // improve performance. Do not use local variables, because the size will - // exceed the limit. + // TODO(zhhsplendid): It needs to be rewritten according to the + // reduction_max operator to improve performance. Do not use local + // variables, because the size will exceed the limit. ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[1], "local"); @@ -184,7 +184,7 @@ std::shared_ptr StrategyForArgmax( std::vector InferShapeForArgmax( const std::vector &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(inputs_shape.size() == 1UL); + CHECK_EQ(inputs_shape.size(), 1UL); auto ndim = inputs_shape[0].size(); CHECK_GT(ndim, 0) << "tensor's dim must be more than 0"; int axis; diff --git a/paddle/cinn/hlir/op/contrib/argmin.cc b/paddle/cinn/hlir/op/contrib/argmin.cc index 057daa68b29..a849982df66 100644 --- a/paddle/cinn/hlir/op/contrib/argmin.cc +++ b/paddle/cinn/hlir/op/contrib/argmin.cc @@ -155,9 +155,9 @@ std::shared_ptr StrategyForArgmin( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: It needs to be rewritten according to the reduction_min operator to - // improve performance. Do not use local variables, because the size will - // exceed the limit. + // TODO(zhhsplendid): It needs to be rewritten according to the + // reduction_min operator to improve performance. Do not use local + // variables, because the size will exceed the limit. ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[1], "local"); int64_t prod_size = std::accumulate(output_shapes[0].begin(), @@ -181,7 +181,7 @@ std::shared_ptr StrategyForArgmin( std::vector InferShapeForArgmin( const std::vector &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(inputs_shape.size() == 1UL); + CHECK_EQ(inputs_shape.size(), 1UL); auto ndim = inputs_shape[0].size(); CHECK_GT(ndim, 0) << "tensor's dim must be more than 0"; int axis; diff --git a/paddle/cinn/hlir/op/contrib/sort.cc b/paddle/cinn/hlir/op/contrib/sort.cc index c5239b8f409..eff362affee 100644 --- a/paddle/cinn/hlir/op/contrib/sort.cc +++ b/paddle/cinn/hlir/op/contrib/sort.cc @@ -213,8 +213,8 @@ std::shared_ptr StrategyForSort( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: remove external calls, do not use local variables, because - // the size will exceed the limit. + // TODO(Shixiaowei02): remove external calls, do not use local variables, + // because the size will exceed the limit. ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[1], "local"); @@ -307,10 +307,11 @@ std::shared_ptr StrategyForArgSort( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: remove external calls, do not use local variables, because - // the size will exceed the limit. - // TODO: There is a bug, setting buffer to "local" here will cause the var - // declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], "local"); + // TODO(Shixiaowei02): remove external calls, do not use local variables, + // because the size will exceed the limit. + // TODO(lanxianghit): There is a bug, setting buffer to "local" here will + // cause the var declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], + // "local"); int64_t prod_size = std::accumulate(output_shapes[0].begin(), output_shapes[0].end(), 1, diff --git a/paddle/cinn/hlir/op/nn.cc b/paddle/cinn/hlir/op/nn.cc index 0f3c93d3124..757d9788561 100644 --- a/paddle/cinn/hlir/op/nn.cc +++ b/paddle/cinn/hlir/op/nn.cc @@ -1716,7 +1716,7 @@ std::shared_ptr StrategyForPool2d( *ret = CINNValuePack{res}; } else { CINNValuePack arg_pack = args[0]; - CHECK(arg_pack.size() == 3UL); + CHECK_EQ(arg_pack.size(), 3UL); Expr out = arg_pack[0]; Expr reduce = arg_pack[1]; CHECK(out.as_tensor() && reduce.as_tensor()); @@ -1834,7 +1834,8 @@ std::shared_ptr StrategyForPool2d( bool use_warp_reduce = false; if (global_pooling && data_format == "NCHW" && target.arch == Target::Arch::NVGPU) { - // TODO 32 may not be the exact number, try also 16 or 8 or other number + // TODO(hp03): 32 may not be the exact number, try also 16 or 8 or other + // number // we choose 32 to make sure all the threads in a warp has work to do, if ((A_tensor->shape[2].as_int32() * A_tensor->shape[3].as_int32()) >= 32) { use_warp_reduce = true; diff --git a/paddle/cinn/hlir/pass/reduce_split_pass.cc b/paddle/cinn/hlir/pass/reduce_split_pass.cc index 39cf77f7ab3..bfcfde59ba0 100644 --- a/paddle/cinn/hlir/pass/reduce_split_pass.cc +++ b/paddle/cinn/hlir/pass/reduce_split_pass.cc @@ -103,7 +103,7 @@ class ReduceSplitPass { auto in_shape = shape_dict.at(in->id()); auto out_shape = shape_dict.at(out->id()); // all preceding reduced - CHECK(in_shape.size() > 1); + CHECK_GT(in_shape.size(), 1); // [NHWC]->[C], only the last dim kept bool all_preceding_dim_reduced = true; for (auto i = 0; i < in_shape.size() - 1; ++i) { @@ -122,7 +122,7 @@ class ReduceSplitPass { in_shape.begin(), in_shape.end(), 1, std::multiplies()); int reduce_numel = std::accumulate( in_shape.begin(), in_shape.end() - 1, 1, std::multiplies()); - CHECK(reduce_numel > 0); + CHECK_GT(reduce_numel, 0); // if the numel is not large enough, it is no need to split // if loop times is too large with reduce optimize int size = std::accumulate( diff --git a/paddle/cinn/hlir/pe/nn.cc b/paddle/cinn/hlir/pe/nn.cc index faf459800d5..152eb1bc384 100644 --- a/paddle/cinn/hlir/pe/nn.cc +++ b/paddle/cinn/hlir/pe/nn.cc @@ -1414,8 +1414,8 @@ std::vector Pool1d(const Tensor &tensor, std::vector GlobalPool2d(const Tensor &tensor, const std::string &pool_type, const std::string &output_name) { - // TODO 1. check warp shuffle is supported! - // TODO 2. using `cub` with NVRTC + // TODO(hp03): 1. check warp shuffle is supported! + // TODO(hp03): 2. using `cub` with NVRTC Expr extend = tensor->shape[2] * tensor->shape[3]; if (pool_type == "max") { auto temp = Compute( diff --git a/paddle/cinn/ir/schedule_desc_test.cc b/paddle/cinn/ir/schedule_desc_test.cc index 805d79230c6..efbfe0603dd 100644 --- a/paddle/cinn/ir/schedule_desc_test.cc +++ b/paddle/cinn/ir/schedule_desc_test.cc @@ -293,7 +293,8 @@ TEST_F(TestScheduleDesc, StepKind_GetBlock) { CheckTracingOutputs({block_b}, trace); CheckTracingOutputs({block_b}, ir_sch.GetTraceDesc()); } -// TODO: fix in future, as fix split var name, this case some problem. +// TODO(SunNy820828449): fix in future, as fix split var name, this case some +// problem. /* TEST_F(TestScheduleDesc, StepKind_Split) { lowered_funcs = LowerCompute({32, 32, 32}, target); diff --git a/paddle/cinn/optim/vectorize_loops.cc b/paddle/cinn/optim/vectorize_loops.cc index eb75b457e9a..4a4cbfcef64 100644 --- a/paddle/cinn/optim/vectorize_loops.cc +++ b/paddle/cinn/optim/vectorize_loops.cc @@ -730,7 +730,7 @@ struct VectorizeLoops_ : public IRMutator { if (forloop->is_vectorized()) { Context::info_rgt().Get("vectorized_forloop_count")++; - CHECK(forloop->vectorize_info().factor > 0); + CHECK_GT(forloop->vectorize_info().factor, 0); CHECK(is_zero(forloop->min)); Expr for_extent = common::AutoSimplify(forloop->extent); diff --git a/paddle/cinn/runtime/tiny_runtime.cc b/paddle/cinn/runtime/tiny_runtime.cc index efb4c2075ec..65bb07759c0 100644 --- a/paddle/cinn/runtime/tiny_runtime.cc +++ b/paddle/cinn/runtime/tiny_runtime.cc @@ -58,10 +58,10 @@ void *load_program(const char *paramfile) { fclose(f); if (std::string(buf, buf + 4) != "CINN") { - // TODO LOG fatal + // TODO(hp03): LOG fatal return nullptr; } - // TODO check param file version + // TODO(hp03): check param file version ctx->major_v = *(int *)(buf + 4); ctx->minor_v = *(int *)(buf + 8); diff --git a/paddle/cinn/utils/random_engine.h b/paddle/cinn/utils/random_engine.h index 15797052a8c..b94dcda043e 100644 --- a/paddle/cinn/utils/random_engine.h +++ b/paddle/cinn/utils/random_engine.h @@ -109,7 +109,7 @@ double SampleUniformDouble(double min, template int SampleDiscreteFromDistribution(const std::vector& weights, LinearRandomEngine::StateType* rand_seed) { - CHECK(weights.size() > 0); + CHECK_GT(weights.size(), 0); LinearRandomEngine engine(rand_seed); std::discrete_distribution dist(weights.begin(), weights.end()); return dist(engine); diff --git a/test/cpp/cinn/concrete_program_builder.h b/test/cpp/cinn/concrete_program_builder.h index 2aa688cd84f..8da4bdab927 100644 --- a/test/cpp/cinn/concrete_program_builder.h +++ b/test/cpp/cinn/concrete_program_builder.h @@ -27,7 +27,7 @@ class BiasBnReLUBuilder : public ProgramBuilder { BiasBnReLUBuilder() : ProgramBuilder("bias_bn_relu_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 4); + CHECK_EQ(inputs_varinfo.size(), 4); auto conv_output = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto bias = builder_.CreateInput( @@ -55,7 +55,7 @@ class ExpTwoConsumersOpBuilder : public ProgramBuilder { ExpTwoConsumersOpBuilder() : ProgramBuilder("exp_two_consumers_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 1); + CHECK_EQ(inputs_varinfo.size(), 1); auto x = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto exp_x = builder_.Exp(x); @@ -76,7 +76,7 @@ class GatherAddSubBuilder : public ProgramBuilder { GatherAddSubBuilder() : ProgramBuilder("gather_add_sub_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 2); + CHECK_EQ(inputs_varinfo.size(), 2); auto x = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto y = builder_.CreateInput( @@ -102,7 +102,7 @@ class FillConstantAddBuilder : public ProgramBuilder { FillConstantAddBuilder() : ProgramBuilder("fill_constant_add_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 1); + CHECK_EQ(inputs_varinfo.size(), 1); auto x = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto fill_constant = -- GitLab