未验证 提交 b918100a 编写于 作者: A a_weng 提交者: GitHub

[CodeStyle][CINN] fix cinn cpplint codestyle about `[readability/todo]`,...

[CodeStyle][CINN] fix cinn cpplint codestyle about `[readability/todo]`, `[readability/check]` (#55022)
上级 3e4c4d91
......@@ -153,7 +153,7 @@ void MultiLevelTiling::ApplyTiling(ir::IRSchedule* ir_schedule,
idx = &r_indices_;
} else {
idx = &s_indices_;
} // TODO: support more iterator variable types
} // TODO(zhhsplendid): support more iterator variable types
int extent = ir_for->extent.as_int32(); // maybe int64?
......
......@@ -148,7 +148,7 @@ TEST(MultiLevelTile, SimpleLoops) {
test_func(&new_states[0]->ir_schedule);
}
// TODO: fix in future
// TODO(SunNy820828449): fix in future
/*
TEST(MulitLevelTile, MatrixMultiply) {
srand(0);
......
......@@ -2190,7 +2190,7 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) {
};
{
// TODO : fix in future.
// TODO(SunNy820828449): fix in future.
// std::vector<Expr> a_args, b_args;
// if (ap)
// a_args = ap->operands();
......
......@@ -45,10 +45,10 @@ void ArgsortOpMapper(const paddle::cpp::OpDesc& op_desc,
ctx.AddVar(indices_name, idx);
ctx.AddVarModelToProgram(indices_name, idx->id);
// TODO: return the sorted tensor here. Now out[1] is a temporary tensor.
// this is because output 'Out' is never uesd in Paddle API, but CINN need to
// return 2 output vars to meet the op defination, this should be resolved
// after sort op restructured.
// TODO(lanxianghit): return the sorted tensor here. Now out[1] is a temporary
// tensor. this is because output 'Out' is never uesd in Paddle API, but CINN
// need to return 2 output vars to meet the op defination, this should be
// resolved after sort op restructured.
ctx.AddVar(out_name, out[1]);
ctx.AddVarModelToProgram(out_name, out[1]->id);
}
......
......@@ -157,9 +157,9 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmax(
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
auto blocks = ir_sch.GetAllBlocks();
// TODO: It needs to be rewritten according to the reduction_max operator to
// improve performance. Do not use local variables, because the size will
// exceed the limit.
// TODO(zhhsplendid): It needs to be rewritten according to the
// reduction_max operator to improve performance. Do not use local
// variables, because the size will exceed the limit.
ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local");
......@@ -184,7 +184,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmax(
std::vector<shape_t> InferShapeForArgmax(
const std::vector<shape_t> &inputs_shape,
const framework::AttrMapType &attrs) {
CHECK(inputs_shape.size() == 1UL);
CHECK_EQ(inputs_shape.size(), 1UL);
auto ndim = inputs_shape[0].size();
CHECK_GT(ndim, 0) << "tensor's dim must be more than 0";
int axis;
......
......@@ -155,9 +155,9 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmin(
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
auto blocks = ir_sch.GetAllBlocks();
// TODO: It needs to be rewritten according to the reduction_min operator to
// improve performance. Do not use local variables, because the size will
// exceed the limit.
// TODO(zhhsplendid): It needs to be rewritten according to the
// reduction_min operator to improve performance. Do not use local
// variables, because the size will exceed the limit.
ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local");
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
......@@ -181,7 +181,7 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmin(
std::vector<shape_t> InferShapeForArgmin(
const std::vector<shape_t> &inputs_shape,
const framework::AttrMapType &attrs) {
CHECK(inputs_shape.size() == 1UL);
CHECK_EQ(inputs_shape.size(), 1UL);
auto ndim = inputs_shape[0].size();
CHECK_GT(ndim, 0) << "tensor's dim must be more than 0";
int axis;
......
......@@ -213,8 +213,8 @@ std::shared_ptr<framework::OpStrategy> StrategyForSort(
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
auto blocks = ir_sch.GetAllBlocks();
// TODO: remove external calls, do not use local variables, because
// the size will exceed the limit.
// TODO(Shixiaowei02): remove external calls, do not use local variables,
// because the size will exceed the limit.
ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local");
......@@ -307,10 +307,11 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgSort(
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
auto blocks = ir_sch.GetAllBlocks();
// TODO: remove external calls, do not use local variables, because
// the size will exceed the limit.
// TODO: There is a bug, setting buffer to "local" here will cause the var
// declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], "local");
// TODO(Shixiaowei02): remove external calls, do not use local variables,
// because the size will exceed the limit.
// TODO(lanxianghit): There is a bug, setting buffer to "local" here will
// cause the var declared twice at CodeGen. ir_sch.SetBuffer(blocks[0],
// "local");
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
......
......@@ -1716,7 +1716,7 @@ std::shared_ptr<OpStrategy> StrategyForPool2d(
*ret = CINNValuePack{res};
} else {
CINNValuePack arg_pack = args[0];
CHECK(arg_pack.size() == 3UL);
CHECK_EQ(arg_pack.size(), 3UL);
Expr out = arg_pack[0];
Expr reduce = arg_pack[1];
CHECK(out.as_tensor() && reduce.as_tensor());
......@@ -1834,7 +1834,8 @@ std::shared_ptr<OpStrategy> StrategyForPool2d(
bool use_warp_reduce = false;
if (global_pooling && data_format == "NCHW" &&
target.arch == Target::Arch::NVGPU) {
// TODO 32 may not be the exact number, try also 16 or 8 or other number
// TODO(hp03): 32 may not be the exact number, try also 16 or 8 or other
// number
// we choose 32 to make sure all the threads in a warp has work to do,
if ((A_tensor->shape[2].as_int32() * A_tensor->shape[3].as_int32()) >= 32) {
use_warp_reduce = true;
......
......@@ -103,7 +103,7 @@ class ReduceSplitPass {
auto in_shape = shape_dict.at(in->id());
auto out_shape = shape_dict.at(out->id());
// all preceding reduced
CHECK(in_shape.size() > 1);
CHECK_GT(in_shape.size(), 1);
// [NHWC]->[C], only the last dim kept
bool all_preceding_dim_reduced = true;
for (auto i = 0; i < in_shape.size() - 1; ++i) {
......@@ -122,7 +122,7 @@ class ReduceSplitPass {
in_shape.begin(), in_shape.end(), 1, std::multiplies<int>());
int reduce_numel = std::accumulate(
in_shape.begin(), in_shape.end() - 1, 1, std::multiplies<int>());
CHECK(reduce_numel > 0);
CHECK_GT(reduce_numel, 0);
// if the numel is not large enough, it is no need to split
// if loop times is too large with reduce optimize
int size = std::accumulate(
......
......@@ -1414,8 +1414,8 @@ std::vector<Tensor> Pool1d(const Tensor &tensor,
std::vector<Tensor> GlobalPool2d(const Tensor &tensor,
const std::string &pool_type,
const std::string &output_name) {
// TODO 1. check warp shuffle is supported!
// TODO 2. using `cub` with NVRTC
// TODO(hp03): 1. check warp shuffle is supported!
// TODO(hp03): 2. using `cub` with NVRTC
Expr extend = tensor->shape[2] * tensor->shape[3];
if (pool_type == "max") {
auto temp = Compute(
......
......@@ -293,7 +293,8 @@ TEST_F(TestScheduleDesc, StepKind_GetBlock) {
CheckTracingOutputs({block_b}, trace);
CheckTracingOutputs({block_b}, ir_sch.GetTraceDesc());
}
// TODO: fix in future, as fix split var name, this case some problem.
// TODO(SunNy820828449): fix in future, as fix split var name, this case some
// problem.
/*
TEST_F(TestScheduleDesc, StepKind_Split) {
lowered_funcs = LowerCompute({32, 32, 32}, target);
......
......@@ -730,7 +730,7 @@ struct VectorizeLoops_ : public IRMutator<Expr *> {
if (forloop->is_vectorized()) {
Context::info_rgt().Get<int>("vectorized_forloop_count")++;
CHECK(forloop->vectorize_info().factor > 0);
CHECK_GT(forloop->vectorize_info().factor, 0);
CHECK(is_zero(forloop->min));
Expr for_extent = common::AutoSimplify(forloop->extent);
......
......@@ -58,10 +58,10 @@ void *load_program(const char *paramfile) {
fclose(f);
if (std::string(buf, buf + 4) != "CINN") {
// TODO LOG fatal
// TODO(hp03): LOG fatal
return nullptr;
}
// TODO check param file version
// TODO(hp03): check param file version
ctx->major_v = *(int *)(buf + 4);
ctx->minor_v = *(int *)(buf + 8);
......
......@@ -109,7 +109,7 @@ double SampleUniformDouble(double min,
template <typename T>
int SampleDiscreteFromDistribution(const std::vector<T>& weights,
LinearRandomEngine::StateType* rand_seed) {
CHECK(weights.size() > 0);
CHECK_GT(weights.size(), 0);
LinearRandomEngine engine(rand_seed);
std::discrete_distribution<int> dist(weights.begin(), weights.end());
return dist(engine);
......
......@@ -27,7 +27,7 @@ class BiasBnReLUBuilder : public ProgramBuilder {
BiasBnReLUBuilder() : ProgramBuilder("bias_bn_relu_builder") {}
frontend::Program Build(const std::vector<VariableInfo>& inputs_varinfo,
const utils::AttributeMap& attrs = {}) {
CHECK(inputs_varinfo.size() == 4);
CHECK_EQ(inputs_varinfo.size(), 4);
auto conv_output = builder_.CreateInput(
inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id);
auto bias = builder_.CreateInput(
......@@ -55,7 +55,7 @@ class ExpTwoConsumersOpBuilder : public ProgramBuilder {
ExpTwoConsumersOpBuilder() : ProgramBuilder("exp_two_consumers_builder") {}
frontend::Program Build(const std::vector<VariableInfo>& inputs_varinfo,
const utils::AttributeMap& attrs = {}) {
CHECK(inputs_varinfo.size() == 1);
CHECK_EQ(inputs_varinfo.size(), 1);
auto x = builder_.CreateInput(
inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id);
auto exp_x = builder_.Exp(x);
......@@ -76,7 +76,7 @@ class GatherAddSubBuilder : public ProgramBuilder {
GatherAddSubBuilder() : ProgramBuilder("gather_add_sub_builder") {}
frontend::Program Build(const std::vector<VariableInfo>& inputs_varinfo,
const utils::AttributeMap& attrs = {}) {
CHECK(inputs_varinfo.size() == 2);
CHECK_EQ(inputs_varinfo.size(), 2);
auto x = builder_.CreateInput(
inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id);
auto y = builder_.CreateInput(
......@@ -102,7 +102,7 @@ class FillConstantAddBuilder : public ProgramBuilder {
FillConstantAddBuilder() : ProgramBuilder("fill_constant_add_builder") {}
frontend::Program Build(const std::vector<VariableInfo>& inputs_varinfo,
const utils::AttributeMap& attrs = {}) {
CHECK(inputs_varinfo.size() == 1);
CHECK_EQ(inputs_varinfo.size(), 1);
auto x = builder_.CreateInput(
inputs_varinfo[0].type, inputs_varinfo[0].shape, inputs_varinfo[0].id);
auto fill_constant =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册