diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc index c35bed808a8e0b2ad0ebbd7dee4d0e550e8fe1f7..459031266a7140abd69cd357237cd22fbd8f68ad 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling.cc @@ -153,7 +153,7 @@ void MultiLevelTiling::ApplyTiling(ir::IRSchedule* ir_schedule, idx = &r_indices_; } else { idx = &s_indices_; - } // TODO: support more iterator variable types + } // TODO(zhhsplendid): support more iterator variable types int extent = ir_for->extent.as_int32(); // maybe int64? diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc index b0214310fe102686080d145b72e37dd5aac278a7..10c0ccc73489b263f7ef99cea4e6ae75beb47192 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/multi_level_tiling_test.cc @@ -148,7 +148,7 @@ TEST(MultiLevelTile, SimpleLoops) { test_func(&new_states[0]->ir_schedule); } -// TODO: fix in future +// TODO(SunNy820828449): fix in future /* TEST(MulitLevelTile, MatrixMultiply) { srand(0); diff --git a/paddle/cinn/common/cas.cc b/paddle/cinn/common/cas.cc index 9b8f1cf0f551b9df7b398b1636f9022b53991ec9..ab23d3eb1e6c4b29eb7c9c2a0d9c069d62dc265c 100644 --- a/paddle/cinn/common/cas.cc +++ b/paddle/cinn/common/cas.cc @@ -2190,7 +2190,7 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) { }; { - // TODO : fix in future. + // TODO(SunNy820828449): fix in future. // std::vector a_args, b_args; // if (ap) // a_args = ap->operands(); diff --git a/paddle/cinn/frontend/op_mappers/paddle/argsort.cc b/paddle/cinn/frontend/op_mappers/paddle/argsort.cc index fb2e76d01ecd90f40019e7273b981b7c76e85d81..0f7c7c624aba873d99f8f25815a91f4ba3029b90 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/argsort.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/argsort.cc @@ -45,10 +45,10 @@ void ArgsortOpMapper(const paddle::cpp::OpDesc& op_desc, ctx.AddVar(indices_name, idx); ctx.AddVarModelToProgram(indices_name, idx->id); - // TODO: return the sorted tensor here. Now out[1] is a temporary tensor. - // this is because output 'Out' is never uesd in Paddle API, but CINN need to - // return 2 output vars to meet the op defination, this should be resolved - // after sort op restructured. + // TODO(lanxianghit): return the sorted tensor here. Now out[1] is a temporary + // tensor. this is because output 'Out' is never uesd in Paddle API, but CINN + // need to return 2 output vars to meet the op defination, this should be + // resolved after sort op restructured. ctx.AddVar(out_name, out[1]); ctx.AddVarModelToProgram(out_name, out[1]->id); } diff --git a/paddle/cinn/hlir/op/contrib/argmax.cc b/paddle/cinn/hlir/op/contrib/argmax.cc index 50ec423d7946213a2caaa7c1bec39da657fa93e9..72e005a27874ac473ab9bc6c811f2deb956774d5 100644 --- a/paddle/cinn/hlir/op/contrib/argmax.cc +++ b/paddle/cinn/hlir/op/contrib/argmax.cc @@ -157,9 +157,9 @@ std::shared_ptr StrategyForArgmax( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: It needs to be rewritten according to the reduction_max operator to - // improve performance. Do not use local variables, because the size will - // exceed the limit. + // TODO(zhhsplendid): It needs to be rewritten according to the + // reduction_max operator to improve performance. Do not use local + // variables, because the size will exceed the limit. ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[1], "local"); @@ -184,7 +184,7 @@ std::shared_ptr StrategyForArgmax( std::vector InferShapeForArgmax( const std::vector &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(inputs_shape.size() == 1UL); + CHECK_EQ(inputs_shape.size(), 1UL); auto ndim = inputs_shape[0].size(); CHECK_GT(ndim, 0) << "tensor's dim must be more than 0"; int axis; diff --git a/paddle/cinn/hlir/op/contrib/argmin.cc b/paddle/cinn/hlir/op/contrib/argmin.cc index 057daa68b295178dbd61005019afdcd712a2eb4b..a849982df6620807463e7ebcdc99bfefa404fdd0 100644 --- a/paddle/cinn/hlir/op/contrib/argmin.cc +++ b/paddle/cinn/hlir/op/contrib/argmin.cc @@ -155,9 +155,9 @@ std::shared_ptr StrategyForArgmin( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: It needs to be rewritten according to the reduction_min operator to - // improve performance. Do not use local variables, because the size will - // exceed the limit. + // TODO(zhhsplendid): It needs to be rewritten according to the + // reduction_min operator to improve performance. Do not use local + // variables, because the size will exceed the limit. ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[1], "local"); int64_t prod_size = std::accumulate(output_shapes[0].begin(), @@ -181,7 +181,7 @@ std::shared_ptr StrategyForArgmin( std::vector InferShapeForArgmin( const std::vector &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(inputs_shape.size() == 1UL); + CHECK_EQ(inputs_shape.size(), 1UL); auto ndim = inputs_shape[0].size(); CHECK_GT(ndim, 0) << "tensor's dim must be more than 0"; int axis; diff --git a/paddle/cinn/hlir/op/contrib/sort.cc b/paddle/cinn/hlir/op/contrib/sort.cc index c5239b8f409d7d4abc09277ccd4c0da02392bcd1..eff362affee3dbe3b1a56aac4ecfd5e8f9b4cbd6 100644 --- a/paddle/cinn/hlir/op/contrib/sort.cc +++ b/paddle/cinn/hlir/op/contrib/sort.cc @@ -213,8 +213,8 @@ std::shared_ptr StrategyForSort( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: remove external calls, do not use local variables, because - // the size will exceed the limit. + // TODO(Shixiaowei02): remove external calls, do not use local variables, + // because the size will exceed the limit. ir_sch.SetBuffer(blocks[0], "local"); ir_sch.SetBuffer(blocks[1], "local"); @@ -307,10 +307,11 @@ std::shared_ptr StrategyForArgSort( ir::IRSchedule ir_sch(mod_expr); ir_sch.MergeExprs(); auto blocks = ir_sch.GetAllBlocks(); - // TODO: remove external calls, do not use local variables, because - // the size will exceed the limit. - // TODO: There is a bug, setting buffer to "local" here will cause the var - // declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], "local"); + // TODO(Shixiaowei02): remove external calls, do not use local variables, + // because the size will exceed the limit. + // TODO(lanxianghit): There is a bug, setting buffer to "local" here will + // cause the var declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], + // "local"); int64_t prod_size = std::accumulate(output_shapes[0].begin(), output_shapes[0].end(), 1, diff --git a/paddle/cinn/hlir/op/nn.cc b/paddle/cinn/hlir/op/nn.cc index 0f3c93d3124c75db4cdb68efd223ef66a83cf646..757d9788561b7fa0d2e980166c0093291575446f 100644 --- a/paddle/cinn/hlir/op/nn.cc +++ b/paddle/cinn/hlir/op/nn.cc @@ -1716,7 +1716,7 @@ std::shared_ptr StrategyForPool2d( *ret = CINNValuePack{res}; } else { CINNValuePack arg_pack = args[0]; - CHECK(arg_pack.size() == 3UL); + CHECK_EQ(arg_pack.size(), 3UL); Expr out = arg_pack[0]; Expr reduce = arg_pack[1]; CHECK(out.as_tensor() && reduce.as_tensor()); @@ -1834,7 +1834,8 @@ std::shared_ptr StrategyForPool2d( bool use_warp_reduce = false; if (global_pooling && data_format == "NCHW" && target.arch == Target::Arch::NVGPU) { - // TODO 32 may not be the exact number, try also 16 or 8 or other number + // TODO(hp03): 32 may not be the exact number, try also 16 or 8 or other + // number // we choose 32 to make sure all the threads in a warp has work to do, if ((A_tensor->shape[2].as_int32() * A_tensor->shape[3].as_int32()) >= 32) { use_warp_reduce = true; diff --git a/paddle/cinn/hlir/pass/reduce_split_pass.cc b/paddle/cinn/hlir/pass/reduce_split_pass.cc index 39cf77f7ab32ed4210d12f757e1e19e5e9053de9..bfcfde59ba0426d1c830edcd6341b6e5db41c74d 100644 --- a/paddle/cinn/hlir/pass/reduce_split_pass.cc +++ b/paddle/cinn/hlir/pass/reduce_split_pass.cc @@ -103,7 +103,7 @@ class ReduceSplitPass { auto in_shape = shape_dict.at(in->id()); auto out_shape = shape_dict.at(out->id()); // all preceding reduced - CHECK(in_shape.size() > 1); + CHECK_GT(in_shape.size(), 1); // [NHWC]->[C], only the last dim kept bool all_preceding_dim_reduced = true; for (auto i = 0; i < in_shape.size() - 1; ++i) { @@ -122,7 +122,7 @@ class ReduceSplitPass { in_shape.begin(), in_shape.end(), 1, std::multiplies()); int reduce_numel = std::accumulate( in_shape.begin(), in_shape.end() - 1, 1, std::multiplies()); - CHECK(reduce_numel > 0); + CHECK_GT(reduce_numel, 0); // if the numel is not large enough, it is no need to split // if loop times is too large with reduce optimize int size = std::accumulate( diff --git a/paddle/cinn/hlir/pe/nn.cc b/paddle/cinn/hlir/pe/nn.cc index faf459800d5236661537a6a5002b98c5205b0594..152eb1bc3847b097a0da7272f8a0376608e139f2 100644 --- a/paddle/cinn/hlir/pe/nn.cc +++ b/paddle/cinn/hlir/pe/nn.cc @@ -1414,8 +1414,8 @@ std::vector Pool1d(const Tensor &tensor, std::vector GlobalPool2d(const Tensor &tensor, const std::string &pool_type, const std::string &output_name) { - // TODO 1. check warp shuffle is supported! - // TODO 2. using `cub` with NVRTC + // TODO(hp03): 1. check warp shuffle is supported! + // TODO(hp03): 2. using `cub` with NVRTC Expr extend = tensor->shape[2] * tensor->shape[3]; if (pool_type == "max") { auto temp = Compute( diff --git a/paddle/cinn/ir/schedule_desc_test.cc b/paddle/cinn/ir/schedule_desc_test.cc index 805d79230c6844775ad30e81e6f4f97969448077..efbfe0603dd54910a7ae5a10ab17ad309059f6d6 100644 --- a/paddle/cinn/ir/schedule_desc_test.cc +++ b/paddle/cinn/ir/schedule_desc_test.cc @@ -293,7 +293,8 @@ TEST_F(TestScheduleDesc, StepKind_GetBlock) { CheckTracingOutputs({block_b}, trace); CheckTracingOutputs({block_b}, ir_sch.GetTraceDesc()); } -// TODO: fix in future, as fix split var name, this case some problem. +// TODO(SunNy820828449): fix in future, as fix split var name, this case some +// problem. /* TEST_F(TestScheduleDesc, StepKind_Split) { lowered_funcs = LowerCompute({32, 32, 32}, target); diff --git a/paddle/cinn/optim/vectorize_loops.cc b/paddle/cinn/optim/vectorize_loops.cc index eb75b457e9a4fa8b84bc6a4bd31fe1614d996e15..4a4cbfcef6481eb300b8d602fedac3eab238164b 100644 --- a/paddle/cinn/optim/vectorize_loops.cc +++ b/paddle/cinn/optim/vectorize_loops.cc @@ -730,7 +730,7 @@ struct VectorizeLoops_ : public IRMutator { if (forloop->is_vectorized()) { Context::info_rgt().Get("vectorized_forloop_count")++; - CHECK(forloop->vectorize_info().factor > 0); + CHECK_GT(forloop->vectorize_info().factor, 0); CHECK(is_zero(forloop->min)); Expr for_extent = common::AutoSimplify(forloop->extent); diff --git a/paddle/cinn/runtime/tiny_runtime.cc b/paddle/cinn/runtime/tiny_runtime.cc index efb4c2075ec0a9819d4628f3ad9dda203b696c98..65bb07759c0342c415aa745914804948c534afbb 100644 --- a/paddle/cinn/runtime/tiny_runtime.cc +++ b/paddle/cinn/runtime/tiny_runtime.cc @@ -58,10 +58,10 @@ void *load_program(const char *paramfile) { fclose(f); if (std::string(buf, buf + 4) != "CINN") { - // TODO LOG fatal + // TODO(hp03): LOG fatal return nullptr; } - // TODO check param file version + // TODO(hp03): check param file version ctx->major_v = *(int *)(buf + 4); ctx->minor_v = *(int *)(buf + 8); diff --git a/paddle/cinn/utils/random_engine.h b/paddle/cinn/utils/random_engine.h index 15797052a8cfb304ccb6600a2673d0b43387b315..b94dcda043e7d79edc180225d6eeb3e7e665adbb 100644 --- a/paddle/cinn/utils/random_engine.h +++ b/paddle/cinn/utils/random_engine.h @@ -109,7 +109,7 @@ double SampleUniformDouble(double min, template int SampleDiscreteFromDistribution(const std::vector& weights, LinearRandomEngine::StateType* rand_seed) { - CHECK(weights.size() > 0); + CHECK_GT(weights.size(), 0); LinearRandomEngine engine(rand_seed); std::discrete_distribution dist(weights.begin(), weights.end()); return dist(engine); diff --git a/test/cpp/cinn/concrete_program_builder.h b/test/cpp/cinn/concrete_program_builder.h index 2aa688cd84f173c2c10d532f3882c6be79618f07..8da4bdab927c9a1114d795e7a450ba89e121406f 100644 --- a/test/cpp/cinn/concrete_program_builder.h +++ b/test/cpp/cinn/concrete_program_builder.h @@ -27,7 +27,7 @@ class BiasBnReLUBuilder : public ProgramBuilder { BiasBnReLUBuilder() : ProgramBuilder("bias_bn_relu_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 4); + CHECK_EQ(inputs_varinfo.size(), 4); auto conv_output = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto bias = builder_.CreateInput( @@ -55,7 +55,7 @@ class ExpTwoConsumersOpBuilder : public ProgramBuilder { ExpTwoConsumersOpBuilder() : ProgramBuilder("exp_two_consumers_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 1); + CHECK_EQ(inputs_varinfo.size(), 1); auto x = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto exp_x = builder_.Exp(x); @@ -76,7 +76,7 @@ class GatherAddSubBuilder : public ProgramBuilder { GatherAddSubBuilder() : ProgramBuilder("gather_add_sub_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 2); + CHECK_EQ(inputs_varinfo.size(), 2); auto x = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto y = builder_.CreateInput( @@ -102,7 +102,7 @@ class FillConstantAddBuilder : public ProgramBuilder { FillConstantAddBuilder() : ProgramBuilder("fill_constant_add_builder") {} frontend::Program Build(const std::vector& inputs_varinfo, const utils::AttributeMap& attrs = {}) { - CHECK(inputs_varinfo.size() == 1); + CHECK_EQ(inputs_varinfo.size(), 1); auto x = builder_.CreateInput( inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id); auto fill_constant =