From ef19521c82db48e9ce209ed734b5b5f3486b5859 Mon Sep 17 00:00:00 2001 From: Difer <707065510@qq.com> Date: Mon, 3 Jul 2023 11:33:48 +0800 Subject: [PATCH] [CodeStyle][CINN] fix cinn codestyle cpplint `[runtime/explicit]` (#55036) * fix cinn cpplint [runtime/explicit] * mark shared nolint * add a nolint and fix typo raito -> ratio --------- Co-authored-by: SigureMo --- paddle/cinn/auto_schedule/cost_model/feature.h | 2 +- paddle/cinn/auto_schedule/database/database.h | 2 +- paddle/cinn/auto_schedule/measure/simple_builder.h | 2 +- paddle/cinn/auto_schedule/measure/simple_runner.h | 2 +- .../search_space/auto_gen_rule/auto_bind.h | 2 +- .../search_space/auto_gen_rule/auto_gen_rule.h | 2 +- .../search_space/auto_gen_rule/auto_unroll.h | 2 +- .../search_space/auto_gen_rule/skip_rule.h | 2 +- .../cinn/auto_schedule/search_space/search_state.cc | 2 +- .../search_strategy/evolutionary_search_test.cc | 3 ++- paddle/cinn/auto_schedule/task/task_optimizer.h | 2 +- paddle/cinn/auto_schedule/task/tune_task.h | 2 +- paddle/cinn/common/shared.h | 2 +- paddle/cinn/frontend/pass/pass_test_helper.h | 2 +- paddle/cinn/frontend/program_pass.h | 2 +- paddle/cinn/hlir/framework/op_lowering_util.cc | 2 +- paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc | 4 ++-- paddle/cinn/hlir/pass/constant_folding_pass.cc | 2 +- paddle/cinn/hlir/pass/custom_call_pass.cc | 2 +- paddle/cinn/hlir/pass/dce_pass.cc | 2 +- paddle/cinn/hlir/pass/dense_merge_pass.cc | 3 ++- paddle/cinn/hlir/pass/fusion_helper_base.h | 2 +- paddle/cinn/hlir/pass/fusion_merge_pass.cc | 2 +- paddle/cinn/hlir/pass/op_fusion_pass.cc | 2 +- paddle/cinn/hlir/pass/single_group_optimize_pass.cc | 2 +- paddle/cinn/hlir/pe/schedule.h | 2 +- paddle/cinn/ir/collect_ir_nodes.cc | 4 ++-- paddle/cinn/ir/function_base.h | 2 +- paddle/cinn/ir/ir_schedule.cc | 5 +++-- paddle/cinn/ir/ir_schedule_util.h | 7 ++++--- paddle/cinn/ir/schedule_desc.h | 4 ++-- paddle/cinn/optim/buffer_assign.cc | 5 +++-- paddle/cinn/optim/ir_simplify.cc | 4 ++-- paddle/cinn/optim/replace_var_with_expr.cc | 2 +- paddle/cinn/poly/dim.h | 2 +- paddle/cinn/runtime/buffer.h | 2 +- paddle/cinn/runtime/cuda/cuda_util.cc | 2 +- paddle/cinn/utils/event.cc | 8 ++++---- paddle/cinn/utils/event.h | 12 ++++++------ paddle/cinn/utils/profiler.h | 3 ++- paddle/cinn/utils/random_engine.h | 2 +- paddle/cinn/utils/sized_multi_set.h | 2 +- test/cpp/cinn/program_builder.h | 2 +- 43 files changed, 65 insertions(+), 59 deletions(-) diff --git a/paddle/cinn/auto_schedule/cost_model/feature.h b/paddle/cinn/auto_schedule/cost_model/feature.h index 8b1b59d92c6..52e43d8ed80 100644 --- a/paddle/cinn/auto_schedule/cost_model/feature.h +++ b/paddle/cinn/auto_schedule/cost_model/feature.h @@ -134,7 +134,7 @@ class Feature { public: Feature(); - Feature(const common::Target& target); + explicit Feature(const common::Target& target); // Convert the various-length loop block features to fixed-size vector std::vector ToFixedSizeVector(); diff --git a/paddle/cinn/auto_schedule/database/database.h b/paddle/cinn/auto_schedule/database/database.h index 2893daa9e4a..dc82475bfd3 100644 --- a/paddle/cinn/auto_schedule/database/database.h +++ b/paddle/cinn/auto_schedule/database/database.h @@ -34,7 +34,7 @@ struct TuningRecord { double execution_cost; // unit: us TuningRecord() = default; - TuningRecord(const proto::TuningRecord& record) + explicit TuningRecord(const proto::TuningRecord& record) : task_key(record.task_key()), predicted_cost(record.predicted_cost()), trace(record.trace()), diff --git a/paddle/cinn/auto_schedule/measure/simple_builder.h b/paddle/cinn/auto_schedule/measure/simple_builder.h index ca098d0ef6f..4a15a1349b0 100644 --- a/paddle/cinn/auto_schedule/measure/simple_builder.h +++ b/paddle/cinn/auto_schedule/measure/simple_builder.h @@ -24,7 +24,7 @@ namespace auto_schedule { // the input schedule as executable objects class SimpleBuilder : public ScheduleBuilder { public: - SimpleBuilder(hlir::framework::GraphCompiler* graph_compiler); + explicit SimpleBuilder(hlir::framework::GraphCompiler* graph_compiler); // Build and pack the result BuildResult Build(const MeasureInput& input) override; diff --git a/paddle/cinn/auto_schedule/measure/simple_runner.h b/paddle/cinn/auto_schedule/measure/simple_runner.h index d466c71b447..9926f45792d 100644 --- a/paddle/cinn/auto_schedule/measure/simple_runner.h +++ b/paddle/cinn/auto_schedule/measure/simple_runner.h @@ -24,7 +24,7 @@ namespace auto_schedule { // kernels and count the elapsed time as the measurement of performance class SimpleRunner : public ScheduleRunner { public: - SimpleRunner(int repeat_times); + explicit SimpleRunner(int repeat_times); MeasureResult Run(const MeasureInput& input, const BuildResult& build_result) override; diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h index e4dfb59e09f..9793e5a5ef3 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h @@ -27,7 +27,7 @@ namespace auto_schedule { // Auto bind GPU index(BlockIdx, ThreadIdx) to the loops around the block class AutoBind : public AutoGenRule { public: - AutoBind(const common::Target& target) : AutoGenRule(target) {} + explicit AutoBind(const common::Target& target) : AutoGenRule(target) {} ~AutoBind() = default; RuleApplyType Init(ir::IRSchedule* init_schedule) override; diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h index 6b74861637c..bf6d3abbf9d 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h @@ -45,7 +45,7 @@ enum class RuleApplyType : int { */ class AutoGenRule { public: - AutoGenRule(const common::Target& target); + explicit AutoGenRule(const common::Target& target); ~AutoGenRule() = default; // Initialize the AutoGenRule, it must be called before further actions. diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h index ee2f2f1ea42..517b5ee5b9b 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h @@ -31,7 +31,7 @@ namespace auto_schedule { // based on actual situation. class AutoUnroll : public AutoGenRule { public: - AutoUnroll(const common::Target& target) : AutoGenRule(target) {} + explicit AutoUnroll(const common::Target& target) : AutoGenRule(target) {} ~AutoUnroll() = default; RuleApplyType Init(ir::IRSchedule* init_schedule) override; diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h index 41564a5202b..c4280b5569f 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h @@ -25,7 +25,7 @@ namespace auto_schedule { class SkipRule : public AutoGenRule { public: - SkipRule(const common::Target& target); + explicit SkipRule(const common::Target& target); ~SkipRule() = default; RuleApplyType Init(ir::IRSchedule* init_schedule) override; diff --git a/paddle/cinn/auto_schedule/search_space/search_state.cc b/paddle/cinn/auto_schedule/search_space/search_state.cc index 5812a6e936a..973270f493e 100644 --- a/paddle/cinn/auto_schedule/search_space/search_state.cc +++ b/paddle/cinn/auto_schedule/search_space/search_state.cc @@ -91,7 +91,7 @@ class DfsWithExprsFields : public ir::IRVisitor { // Generate a reduce hash of a AST tree by combining hash of each AST node class IrNodesStructuralHash : public DfsWithExprsFields { public: - IrNodesStructuralHash(size_t init_key) : hash_key_(init_key) {} + explicit IrNodesStructuralHash(size_t init_key) : hash_key_(init_key) {} size_t operator()(const Expr* expr) { Visit(expr); return hash_key_; diff --git a/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc b/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc index 23743384c71..ab1abc0c477 100644 --- a/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc +++ b/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc @@ -64,7 +64,8 @@ std::vector CreateTasks(const frontend::Program& program, */ class MockSearchSpace : public SearchSpace { public: - MockSearchSpace(const TuneTask& tune_task) : SearchSpace(tune_task) {} + explicit MockSearchSpace(const TuneTask& tune_task) + : SearchSpace(tune_task) {} int GetMinExprValue() const { return min_expr_value_; } diff --git a/paddle/cinn/auto_schedule/task/task_optimizer.h b/paddle/cinn/auto_schedule/task/task_optimizer.h index 849a8f423bc..ac49686463f 100644 --- a/paddle/cinn/auto_schedule/task/task_optimizer.h +++ b/paddle/cinn/auto_schedule/task/task_optimizer.h @@ -45,7 +45,7 @@ class TaskOptimizer { std::string from; double cost; FunctionGroup functions; - Result(const std::string& from_type) + explicit Result(const std::string& from_type) : from(from_type), cost(std::numeric_limits::max()) {} }; diff --git a/paddle/cinn/auto_schedule/task/tune_task.h b/paddle/cinn/auto_schedule/task/tune_task.h index 2921f41a0f5..5c3403d4278 100644 --- a/paddle/cinn/auto_schedule/task/tune_task.h +++ b/paddle/cinn/auto_schedule/task/tune_task.h @@ -36,7 +36,7 @@ namespace auto_schedule { class TuneTask { public: TuneTask() = default; - TuneTask(std::shared_ptr group) + explicit TuneTask(std::shared_ptr group) : subgraph(group) {} // Initialize a task void Initialize( diff --git a/paddle/cinn/common/shared.h b/paddle/cinn/common/shared.h index 6c2e042ca63..cf810f14c59 100644 --- a/paddle/cinn/common/shared.h +++ b/paddle/cinn/common/shared.h @@ -55,7 +55,7 @@ struct Shared { using object_ptr = T*; Shared() = default; - Shared(T* p) : p_(p) { + Shared(T* p) : p_(p) { // NOLINT if (p) IncRef(p); } Shared(const Shared& other) : p_(other.p_) { IncRef(p_); } diff --git a/paddle/cinn/frontend/pass/pass_test_helper.h b/paddle/cinn/frontend/pass/pass_test_helper.h index 5c57835e1f1..e56746a8636 100644 --- a/paddle/cinn/frontend/pass/pass_test_helper.h +++ b/paddle/cinn/frontend/pass/pass_test_helper.h @@ -143,7 +143,7 @@ inline std::vector RunProgram( struct OptimizeConfig { struct PassGroup; - OptimizeConfig(const PassGroup& program_passes) + explicit OptimizeConfig(const PassGroup& program_passes) : program_passes{program_passes} { if (FLAGS_cinn_use_op_fusion) { graph_passes = {{"OpFusionPass", "FusionMergePass"}, diff --git a/paddle/cinn/frontend/program_pass.h b/paddle/cinn/frontend/program_pass.h index 4e54c70fc5e..ecdb23ef2e1 100755 --- a/paddle/cinn/frontend/program_pass.h +++ b/paddle/cinn/frontend/program_pass.h @@ -28,7 +28,7 @@ namespace frontend { class ProgramPass { public: - ProgramPass(const std::string& name) : name_(name) {} + explicit ProgramPass(const std::string& name) : name_(name) {} /** * \brief Apply a sequence of passes on a program. diff --git a/paddle/cinn/hlir/framework/op_lowering_util.cc b/paddle/cinn/hlir/framework/op_lowering_util.cc index 807d70eb864..88963f5b989 100644 --- a/paddle/cinn/hlir/framework/op_lowering_util.cc +++ b/paddle/cinn/hlir/framework/op_lowering_util.cc @@ -1185,7 +1185,7 @@ void LoopAssignReduce( // The struct used to remove the original block in ComputeAt. class RemoveExpr : public ir::IRMutator<> { public: - RemoveExpr(const Expr& target) : target_(target) {} + explicit RemoveExpr(const Expr& target) : target_(target) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } diff --git a/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc b/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc index 3573adb8101..52a5d128860 100644 --- a/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc +++ b/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc @@ -50,7 +50,7 @@ using DtypeDict = absl::flat_hash_map; namespace utils { class AssertMsg { public: - AssertMsg(int group_id) : group_id_(group_id) {} + explicit AssertMsg(int group_id) : group_id_(group_id) {} void SetMsg(const std::string& title, const std::string& msg) { msg_info_[title] = msg; @@ -80,7 +80,7 @@ class AssertMsg { class CheckFusionAccuracyPass { public: - CheckFusionAccuracyPass(Graph* graph) + explicit CheckFusionAccuracyPass(Graph* graph) : graph_(graph), shape_dict_(graph_->GetMutableAttrs("infershape")), dtype_dict_(graph_->GetMutableAttrs("inferdtype")) {} diff --git a/paddle/cinn/hlir/pass/constant_folding_pass.cc b/paddle/cinn/hlir/pass/constant_folding_pass.cc index e0396b13739..50a76f54cb3 100644 --- a/paddle/cinn/hlir/pass/constant_folding_pass.cc +++ b/paddle/cinn/hlir/pass/constant_folding_pass.cc @@ -35,7 +35,7 @@ using AlterFunction = // class ConstantFoldingPassHelper : public FusionHelperBase { public: - ConstantFoldingPassHelper(Graph* graph) + explicit ConstantFoldingPassHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) { RegisterAlterFunction(); } diff --git a/paddle/cinn/hlir/pass/custom_call_pass.cc b/paddle/cinn/hlir/pass/custom_call_pass.cc index 2d47a211c6b..99906837881 100644 --- a/paddle/cinn/hlir/pass/custom_call_pass.cc +++ b/paddle/cinn/hlir/pass/custom_call_pass.cc @@ -30,7 +30,7 @@ using framework::NodeData; class GraphAlterHelper { public: - GraphAlterHelper(Graph* graph) : graph_(graph) { + explicit GraphAlterHelper(Graph* graph) : graph_(graph) { if (!FLAGS_cinn_custom_call_deny_ops.empty()) { auto splited_names = cinn::utils::Split(FLAGS_cinn_custom_call_deny_ops, ";"); diff --git a/paddle/cinn/hlir/pass/dce_pass.cc b/paddle/cinn/hlir/pass/dce_pass.cc index 32e6e952ed6..fd439c1e97c 100644 --- a/paddle/cinn/hlir/pass/dce_pass.cc +++ b/paddle/cinn/hlir/pass/dce_pass.cc @@ -38,7 +38,7 @@ using ConditionFunction = class DceHelper : public FusionHelperBase { public: - DceHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {} + explicit DceHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {} void operator()() { if (output_nodes_set_.empty()) { diff --git a/paddle/cinn/hlir/pass/dense_merge_pass.cc b/paddle/cinn/hlir/pass/dense_merge_pass.cc index 3ffd7fb369e..c8433f3a85f 100644 --- a/paddle/cinn/hlir/pass/dense_merge_pass.cc +++ b/paddle/cinn/hlir/pass/dense_merge_pass.cc @@ -31,7 +31,8 @@ using framework::NodeAttr; class DenseMergePassHelper : public FusionHelperBase { public: - DenseMergePassHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {} + explicit DenseMergePassHelper(Graph* graph) + : FusionHelperBase(graph), graph_(graph) {} void operator()() { auto nodes_inorder = std::get<0>(graph_->topological_order()); diff --git a/paddle/cinn/hlir/pass/fusion_helper_base.h b/paddle/cinn/hlir/pass/fusion_helper_base.h index c15abaa8b9a..d3c9e5c0755 100644 --- a/paddle/cinn/hlir/pass/fusion_helper_base.h +++ b/paddle/cinn/hlir/pass/fusion_helper_base.h @@ -37,7 +37,7 @@ using framework::shape_t; class FusionHelperBase { public: - FusionHelperBase(const framework::Graph* graph) + explicit FusionHelperBase(const framework::Graph* graph) : shape_dict_(graph->GetAttrs>( "infershape")), target_(graph->target_) { diff --git a/paddle/cinn/hlir/pass/fusion_merge_pass.cc b/paddle/cinn/hlir/pass/fusion_merge_pass.cc index af286547037..dc09bd5c7b5 100644 --- a/paddle/cinn/hlir/pass/fusion_merge_pass.cc +++ b/paddle/cinn/hlir/pass/fusion_merge_pass.cc @@ -44,7 +44,7 @@ using ConditionFunction = std::functionfusion_groups; // init fusion relation. InitFusionRelation(); diff --git a/paddle/cinn/hlir/pass/op_fusion_pass.cc b/paddle/cinn/hlir/pass/op_fusion_pass.cc index 6648cb03613..302c9b71b5a 100644 --- a/paddle/cinn/hlir/pass/op_fusion_pass.cc +++ b/paddle/cinn/hlir/pass/op_fusion_pass.cc @@ -40,7 +40,7 @@ using ConditionFunction = // code generation. class OpFusionPassHelper : public FusionHelperBase { public: - OpFusionPassHelper(const Graph* graph) : FusionHelperBase(graph) { + explicit OpFusionPassHelper(const Graph* graph) : FusionHelperBase(graph) { // init fusion relation InitFusionRelation(); // filter node data, create group for each node diff --git a/paddle/cinn/hlir/pass/single_group_optimize_pass.cc b/paddle/cinn/hlir/pass/single_group_optimize_pass.cc index c79dbfcb5ef..1f8982192cd 100644 --- a/paddle/cinn/hlir/pass/single_group_optimize_pass.cc +++ b/paddle/cinn/hlir/pass/single_group_optimize_pass.cc @@ -46,7 +46,7 @@ bool IsValueZero(cinn::utils::Attribute value) { class SingleGroupOptimizePass { public: - SingleGroupOptimizePass(Graph* graph); + explicit SingleGroupOptimizePass(Graph* graph); std::vector> Apply(); diff --git a/paddle/cinn/hlir/pe/schedule.h b/paddle/cinn/hlir/pe/schedule.h index 3c05084335c..c22c8fbb7a9 100644 --- a/paddle/cinn/hlir/pe/schedule.h +++ b/paddle/cinn/hlir/pe/schedule.h @@ -54,7 +54,7 @@ class ScheduleParam { int Count(const std::string &key) { return param_data.count(key); } private: - ScheduleParam(common::Target::Arch arch); + explicit ScheduleParam(common::Target::Arch arch); absl::flat_hash_map>> param_data; diff --git a/paddle/cinn/ir/collect_ir_nodes.cc b/paddle/cinn/ir/collect_ir_nodes.cc index afbf99e59d9..4c00ac975cc 100644 --- a/paddle/cinn/ir/collect_ir_nodes.cc +++ b/paddle/cinn/ir/collect_ir_nodes.cc @@ -151,7 +151,7 @@ std::set CollectLoadTensors(Expr x, struct Mutator : public ir::IRMutator { std::function teller; std::set exprs; - Mutator(std::function&& teller) + explicit Mutator(std::function&& teller) : teller(std::move(teller)) {} void operator()(const Expr* expr) { @@ -175,7 +175,7 @@ std::set CollectStoreTensors(Expr x, struct Mutator : public ir::IRMutator { std::function teller; std::set exprs; - Mutator(std::function&& teller) + explicit Mutator(std::function&& teller) : teller(std::move(teller)) {} void operator()(const Expr* expr) { diff --git a/paddle/cinn/ir/function_base.h b/paddle/cinn/ir/function_base.h index dd4be902da2..78cef3d5066 100644 --- a/paddle/cinn/ir/function_base.h +++ b/paddle/cinn/ir/function_base.h @@ -27,7 +27,7 @@ class FunctionBase : public IrNode { class FunctionRef : public IrNodeRef { public: FunctionRef() = default; - FunctionRef(IrNode* n) : IrNodeRef(n) {} + explicit FunctionRef(IrNode* n) : IrNodeRef(n) {} }; } // namespace ir diff --git a/paddle/cinn/ir/ir_schedule.cc b/paddle/cinn/ir/ir_schedule.cc index 0214c83bb3c..04041c9d8c3 100644 --- a/paddle/cinn/ir/ir_schedule.cc +++ b/paddle/cinn/ir/ir_schedule.cc @@ -1216,7 +1216,7 @@ struct LoopReconstructor : public ir::IRMutator<> { struct FixLocalBufferSize : public ir::IRMutator<> { public: - FixLocalBufferSize(const std::string& tensor_name) + explicit FixLocalBufferSize(const std::string& tensor_name) : tensor_name_(tensor_name) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } @@ -1697,7 +1697,8 @@ void ScheduleImpl::ReverseComputeInline(const Expr& schedule_block) { struct FindBlockParent : public ir::IRMutator<> { public: - FindBlockParent(const std::string& block_name) : block_name_(block_name) {} + explicit FindBlockParent(const std::string& block_name) + : block_name_(block_name) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } diff --git a/paddle/cinn/ir/ir_schedule_util.h b/paddle/cinn/ir/ir_schedule_util.h index 802a134d23b..0107054e413 100644 --- a/paddle/cinn/ir/ir_schedule_util.h +++ b/paddle/cinn/ir/ir_schedule_util.h @@ -45,7 +45,8 @@ struct CompVar { }; struct MappingVarToExprMutator : public ir::IRMutator<> { - MappingVarToExprMutator(const std::map& replacing_map) + explicit MappingVarToExprMutator( + const std::map& replacing_map) : replacing_map_(replacing_map) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } @@ -62,7 +63,7 @@ struct MappingVarToExprMutator : public ir::IRMutator<> { }; struct FindLoopsVisitor { - FindLoopsVisitor(const Expr& block) : block_(block) {} + explicit FindLoopsVisitor(const Expr& block) : block_(block) {} std::vector operator()(const Expr* expr) { CHECK(block_.As()); @@ -112,7 +113,7 @@ struct FindLoopsVisitor { Tensor GetTensor(const Expr& block); struct FindBlocksVisitor { - FindBlocksVisitor(const std::string& block_name = "") + explicit FindBlocksVisitor(const std::string& block_name = "") : block_name_(block_name) {} std::vector operator()(const Expr* expr) { diff --git a/paddle/cinn/ir/schedule_desc.h b/paddle/cinn/ir/schedule_desc.h index 9cac7ac8781..c166479e695 100644 --- a/paddle/cinn/ir/schedule_desc.h +++ b/paddle/cinn/ir/schedule_desc.h @@ -66,9 +66,9 @@ class ScheduleDesc { ScheduleDesc() = default; - ScheduleDesc(const std::vector& steps) : steps_(steps) {} + explicit ScheduleDesc(const std::vector& steps) : steps_(steps) {} - ScheduleDesc(std::vector&& steps) : steps_(steps) {} + explicit ScheduleDesc(std::vector&& steps) : steps_(steps) {} // Append a new step void Append(Step&& step); diff --git a/paddle/cinn/optim/buffer_assign.cc b/paddle/cinn/optim/buffer_assign.cc index f5f0e47a68f..dcfd6d8b8fb 100644 --- a/paddle/cinn/optim/buffer_assign.cc +++ b/paddle/cinn/optim/buffer_assign.cc @@ -26,7 +26,7 @@ namespace optim { namespace { struct BufferUFNode : public common::UnionFindNode { - BufferUFNode(const std::string& x) : tensor_name(x) {} + explicit BufferUFNode(const std::string& x) : tensor_name(x) {} const char* type_info() const override { return __type_info__; } @@ -38,7 +38,8 @@ const char* BufferUFNode::__type_info__ = "BufferUFNode"; struct IRReplaceTensorMutator : ir::IRMutator<> { const std::map& tensor_map; - IRReplaceTensorMutator(const std::map& tensor_map) + explicit IRReplaceTensorMutator( + const std::map& tensor_map) : tensor_map(tensor_map) {} void operator()(Expr* expr) { ir::IRMutator<>::Visit(expr, expr); } diff --git a/paddle/cinn/optim/ir_simplify.cc b/paddle/cinn/optim/ir_simplify.cc index a101482b64f..48645690f9d 100644 --- a/paddle/cinn/optim/ir_simplify.cc +++ b/paddle/cinn/optim/ir_simplify.cc @@ -261,7 +261,7 @@ struct ReplaceFracWithDivMutator : public ir::IRMutator<> { }; struct SimplifyBlocksMutator : public ir::IRMutator<> { - explicit SimplifyBlocksMutator() {} + SimplifyBlocksMutator() {} void operator()(Expr* x) { ir::IRMutator::Visit(x, x); } @@ -320,7 +320,7 @@ struct SimplifyBlocksMutator : public ir::IRMutator<> { struct SimplifyForLoopsMutator : public ir::IRMutator<> { absl::flat_hash_map var_intervals; - explicit SimplifyForLoopsMutator() {} + SimplifyForLoopsMutator() {} void operator()(Expr* x) { ir::IRMutator::Visit(x, x); } diff --git a/paddle/cinn/optim/replace_var_with_expr.cc b/paddle/cinn/optim/replace_var_with_expr.cc index 10dc22c80b0..f994eeb2e2a 100644 --- a/paddle/cinn/optim/replace_var_with_expr.cc +++ b/paddle/cinn/optim/replace_var_with_expr.cc @@ -116,7 +116,7 @@ void ReplaceVarWithExpr(Expr* source, } struct CollectTensorIndexMutator : public ir::IRMutator<> { - CollectTensorIndexMutator(const std::string& tensor_name) + explicit CollectTensorIndexMutator(const std::string& tensor_name) : tensor_name_(tensor_name) {} std::vector> operator()(Expr* expr) { diff --git a/paddle/cinn/poly/dim.h b/paddle/cinn/poly/dim.h index 6b197eaf214..5ae7ee7a897 100644 --- a/paddle/cinn/poly/dim.h +++ b/paddle/cinn/poly/dim.h @@ -46,7 +46,7 @@ struct Dim { value_t upper_bound; //! Construct a parameter. - Dim(std::string id) : id(std::move(id)) {} + explicit Dim(std::string id) : id(std::move(id)) {} //! Construct a dimension with integer range. Dim(std::string id, uint32_t lower_bound, uint32_t upper_bound) diff --git a/paddle/cinn/runtime/buffer.h b/paddle/cinn/runtime/buffer.h index c3eb5c43b58..b211389c6dc 100755 --- a/paddle/cinn/runtime/buffer.h +++ b/paddle/cinn/runtime/buffer.h @@ -64,7 +64,7 @@ struct Shape { template class Buffer { public: - Buffer(const Shape& shape) : shape_(shape) {} + explicit Buffer(const Shape& shape) : shape_(shape) {} //! Allocate the memory in host device. void AllocHost() { diff --git a/paddle/cinn/runtime/cuda/cuda_util.cc b/paddle/cinn/runtime/cuda/cuda_util.cc index 331e6786895..6fb82ccb8a0 100644 --- a/paddle/cinn/runtime/cuda/cuda_util.cc +++ b/paddle/cinn/runtime/cuda/cuda_util.cc @@ -1972,7 +1972,7 @@ class CurandGenerator { CURAND_CALL(curandCreateGenerator(&generator_, CURAND_RNG_PSEUDO_DEFAULT)); } - CurandGenerator(curandRngType rng_type) { + explicit CurandGenerator(curandRngType rng_type) { CURAND_CALL(curandCreateGenerator(&generator_, rng_type)); } diff --git a/paddle/cinn/utils/event.cc b/paddle/cinn/utils/event.cc index 6f319bf5e44..ca06ae73c67 100644 --- a/paddle/cinn/utils/event.cc +++ b/paddle/cinn/utils/event.cc @@ -76,9 +76,9 @@ std::string Summary::Format(const std::vector &events) { } // Calculate Ratio for (auto &item : items) { - item.sub_raito = + item.sub_ratio = item.info.duration_ / category_cost[item.info.type_] * 100.0; - item.total_raito = item.info.duration_ / total_cost * 100.0; + item.total_ratio = item.info.duration_ / total_cost * 100.0; } std::sort(items.begin(), items.end()); @@ -113,8 +113,8 @@ std::string Summary::AsStr(const std::vector &items, int data_width) { std::vector infos = {EventTypeToString(item.info.type_), item.info.annotation_, std::to_string(item.info.duration_), - item.sub_raito.ToStr(), - item.total_raito.ToStr()}; + item.sub_ratio.ToStr(), + item.total_ratio.ToStr()}; idx = 0; for (auto &info : infos) { pad_size = widths[idx] > info.size() ? widths[idx] - info.size() : 1; diff --git a/paddle/cinn/utils/event.h b/paddle/cinn/utils/event.h index a87dcf4828e..5d7b8113a1d 100644 --- a/paddle/cinn/utils/event.h +++ b/paddle/cinn/utils/event.h @@ -67,20 +67,20 @@ struct HostEvent { class Summary { public: - struct Raito { + struct Ratio { double value; - Raito(double val) : value(val) {} + Ratio(double val) : value(val) {} // NOLINT std::string ToStr() const { return std::to_string(value); } }; struct Item { HostEvent info; - Raito sub_raito{0.0}; // percentage of EventType - Raito total_raito{0.0}; // precentage of total process + Ratio sub_ratio{0.0}; // percentage of EventType + Ratio total_ratio{0.0}; // precentage of total process - Item(const HostEvent& e) : info(e) {} + explicit Item(const HostEvent& e) : info(e) {} bool operator<(const Item& other) const { - return total_raito.value > other.total_raito.value; + return total_ratio.value > other.total_ratio.value; } }; diff --git a/paddle/cinn/utils/profiler.h b/paddle/cinn/utils/profiler.h index 369e9749e33..e629948e304 100644 --- a/paddle/cinn/utils/profiler.h +++ b/paddle/cinn/utils/profiler.h @@ -66,7 +66,8 @@ class RecordEvent { using CallBack = std::function; public: - RecordEvent(const std::string& name, EventType type = EventType::kOrdinary); + explicit RecordEvent(const std::string& name, + EventType type = EventType::kOrdinary); void End(); diff --git a/paddle/cinn/utils/random_engine.h b/paddle/cinn/utils/random_engine.h index b94dcda043e..05162b288c7 100644 --- a/paddle/cinn/utils/random_engine.h +++ b/paddle/cinn/utils/random_engine.h @@ -49,7 +49,7 @@ class LinearRandomEngine { static constexpr StateType modulus = 2147483647; // Construct a linear random engine with a random state pointer - LinearRandomEngine(StateType* state) : state_(state) {} + explicit LinearRandomEngine(StateType* state) : state_(state) {} // operator() is needed by std::xxx_distribution result_type operator()() { return Next(); } diff --git a/paddle/cinn/utils/sized_multi_set.h b/paddle/cinn/utils/sized_multi_set.h index f6a35f3627f..d36fb7a0192 100644 --- a/paddle/cinn/utils/sized_multi_set.h +++ b/paddle/cinn/utils/sized_multi_set.h @@ -37,7 +37,7 @@ template > class SizedMultiSet { public: - SizedMultiSet(size_t capacity, bool pop_max_when_full = true) + explicit SizedMultiSet(size_t capacity, bool pop_max_when_full = true) : capacity_(capacity), pop_max_when_full_(pop_max_when_full) {} void Push(const T& data) { diff --git a/test/cpp/cinn/program_builder.h b/test/cpp/cinn/program_builder.h index dde7093b1f7..febfe36d8e8 100644 --- a/test/cpp/cinn/program_builder.h +++ b/test/cpp/cinn/program_builder.h @@ -40,7 +40,7 @@ struct VariableInfo { // variables and attributes class ProgramBuilder { public: - ProgramBuilder(const std::string& name) : builder_(name) {} + explicit ProgramBuilder(const std::string& name) : builder_(name) {} /* * \brief Build a frontend::Program with the input variables info and -- GitLab