diff --git a/paddle/cinn/auto_schedule/cost_model/feature.h b/paddle/cinn/auto_schedule/cost_model/feature.h index 8b1b59d92c6e38514236210fae3299e993c2878b..52e43d8ed80b2bcd53cab7af85f99b7e153f4e32 100644 --- a/paddle/cinn/auto_schedule/cost_model/feature.h +++ b/paddle/cinn/auto_schedule/cost_model/feature.h @@ -134,7 +134,7 @@ class Feature { public: Feature(); - Feature(const common::Target& target); + explicit Feature(const common::Target& target); // Convert the various-length loop block features to fixed-size vector std::vector ToFixedSizeVector(); diff --git a/paddle/cinn/auto_schedule/database/database.h b/paddle/cinn/auto_schedule/database/database.h index 2893daa9e4a2a5910a6c1e56f1ec5ac4edc88a5a..dc82475bfd37a1f649ff55d2d47d1eb1da67d0bc 100644 --- a/paddle/cinn/auto_schedule/database/database.h +++ b/paddle/cinn/auto_schedule/database/database.h @@ -34,7 +34,7 @@ struct TuningRecord { double execution_cost; // unit: us TuningRecord() = default; - TuningRecord(const proto::TuningRecord& record) + explicit TuningRecord(const proto::TuningRecord& record) : task_key(record.task_key()), predicted_cost(record.predicted_cost()), trace(record.trace()), diff --git a/paddle/cinn/auto_schedule/measure/simple_builder.h b/paddle/cinn/auto_schedule/measure/simple_builder.h index ca098d0ef6ffc9835848f19307ea22476ec1ddc8..4a15a1349b07f75c6f2af51590f5de49e6516928 100644 --- a/paddle/cinn/auto_schedule/measure/simple_builder.h +++ b/paddle/cinn/auto_schedule/measure/simple_builder.h @@ -24,7 +24,7 @@ namespace auto_schedule { // the input schedule as executable objects class SimpleBuilder : public ScheduleBuilder { public: - SimpleBuilder(hlir::framework::GraphCompiler* graph_compiler); + explicit SimpleBuilder(hlir::framework::GraphCompiler* graph_compiler); // Build and pack the result BuildResult Build(const MeasureInput& input) override; diff --git a/paddle/cinn/auto_schedule/measure/simple_runner.h b/paddle/cinn/auto_schedule/measure/simple_runner.h index d466c71b447d8ac542f85f35d6abc99270dc6fab..9926f45792db2e504ab943cfe47a372204fcd3f5 100644 --- a/paddle/cinn/auto_schedule/measure/simple_runner.h +++ b/paddle/cinn/auto_schedule/measure/simple_runner.h @@ -24,7 +24,7 @@ namespace auto_schedule { // kernels and count the elapsed time as the measurement of performance class SimpleRunner : public ScheduleRunner { public: - SimpleRunner(int repeat_times); + explicit SimpleRunner(int repeat_times); MeasureResult Run(const MeasureInput& input, const BuildResult& build_result) override; diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h index e4dfb59e09ff079bc418247a168ce3a3b4b40e21..9793e5a5ef3aceeed1e7af5162a2c051b0c03ab6 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_bind.h @@ -27,7 +27,7 @@ namespace auto_schedule { // Auto bind GPU index(BlockIdx, ThreadIdx) to the loops around the block class AutoBind : public AutoGenRule { public: - AutoBind(const common::Target& target) : AutoGenRule(target) {} + explicit AutoBind(const common::Target& target) : AutoGenRule(target) {} ~AutoBind() = default; RuleApplyType Init(ir::IRSchedule* init_schedule) override; diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h index 6b74861637c61fbcc80250029fc2c3dbb0ca4f44..bf6d3abbf9d6ddeccd4a18fc4cf4b462e26b33d2 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_gen_rule.h @@ -45,7 +45,7 @@ enum class RuleApplyType : int { */ class AutoGenRule { public: - AutoGenRule(const common::Target& target); + explicit AutoGenRule(const common::Target& target); ~AutoGenRule() = default; // Initialize the AutoGenRule, it must be called before further actions. diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h index ee2f2f1ea42ac6736820f57139bb9d291ea8151b..517b5ee5b9b2c3f59155d29cb624717babb865d2 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_unroll.h @@ -31,7 +31,7 @@ namespace auto_schedule { // based on actual situation. class AutoUnroll : public AutoGenRule { public: - AutoUnroll(const common::Target& target) : AutoGenRule(target) {} + explicit AutoUnroll(const common::Target& target) : AutoGenRule(target) {} ~AutoUnroll() = default; RuleApplyType Init(ir::IRSchedule* init_schedule) override; diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h index 41564a5202b704e8cb7561913213b835241feab3..c4280b5569f7ef627e25a6a3a0efe15358a60c6a 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/skip_rule.h @@ -25,7 +25,7 @@ namespace auto_schedule { class SkipRule : public AutoGenRule { public: - SkipRule(const common::Target& target); + explicit SkipRule(const common::Target& target); ~SkipRule() = default; RuleApplyType Init(ir::IRSchedule* init_schedule) override; diff --git a/paddle/cinn/auto_schedule/search_space/search_state.cc b/paddle/cinn/auto_schedule/search_space/search_state.cc index 5812a6e936a8c3b30fef6af49da7937358b7092c..973270f493eede57d2b25ca902df6b4cbe694b60 100644 --- a/paddle/cinn/auto_schedule/search_space/search_state.cc +++ b/paddle/cinn/auto_schedule/search_space/search_state.cc @@ -91,7 +91,7 @@ class DfsWithExprsFields : public ir::IRVisitor { // Generate a reduce hash of a AST tree by combining hash of each AST node class IrNodesStructuralHash : public DfsWithExprsFields { public: - IrNodesStructuralHash(size_t init_key) : hash_key_(init_key) {} + explicit IrNodesStructuralHash(size_t init_key) : hash_key_(init_key) {} size_t operator()(const Expr* expr) { Visit(expr); return hash_key_; diff --git a/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc b/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc index 23743384c71f3a3419bbe448a1803c15176f57c0..ab1abc0c4773c9e5c8ffa0358c81ce0b6b1d8ab1 100644 --- a/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc +++ b/paddle/cinn/auto_schedule/search_strategy/evolutionary_search_test.cc @@ -64,7 +64,8 @@ std::vector CreateTasks(const frontend::Program& program, */ class MockSearchSpace : public SearchSpace { public: - MockSearchSpace(const TuneTask& tune_task) : SearchSpace(tune_task) {} + explicit MockSearchSpace(const TuneTask& tune_task) + : SearchSpace(tune_task) {} int GetMinExprValue() const { return min_expr_value_; } diff --git a/paddle/cinn/auto_schedule/task/task_optimizer.h b/paddle/cinn/auto_schedule/task/task_optimizer.h index 849a8f423bcb98e3f2674bf9ccd555eb9af7619a..ac49686463f1914af39c509f13b5fe5ab16373f5 100644 --- a/paddle/cinn/auto_schedule/task/task_optimizer.h +++ b/paddle/cinn/auto_schedule/task/task_optimizer.h @@ -45,7 +45,7 @@ class TaskOptimizer { std::string from; double cost; FunctionGroup functions; - Result(const std::string& from_type) + explicit Result(const std::string& from_type) : from(from_type), cost(std::numeric_limits::max()) {} }; diff --git a/paddle/cinn/auto_schedule/task/tune_task.h b/paddle/cinn/auto_schedule/task/tune_task.h index 2921f41a0f5fda85d1ddc0a56e99d8a8f6eed470..5c3403d427804b1160ecbaa74af318c8745ca63f 100644 --- a/paddle/cinn/auto_schedule/task/tune_task.h +++ b/paddle/cinn/auto_schedule/task/tune_task.h @@ -36,7 +36,7 @@ namespace auto_schedule { class TuneTask { public: TuneTask() = default; - TuneTask(std::shared_ptr group) + explicit TuneTask(std::shared_ptr group) : subgraph(group) {} // Initialize a task void Initialize( diff --git a/paddle/cinn/common/shared.h b/paddle/cinn/common/shared.h index 6c2e042ca6364595cc866a335f8cab804ceb3514..cf810f14c5988dca082f214642dd8c38f9c5ac6e 100644 --- a/paddle/cinn/common/shared.h +++ b/paddle/cinn/common/shared.h @@ -55,7 +55,7 @@ struct Shared { using object_ptr = T*; Shared() = default; - Shared(T* p) : p_(p) { + Shared(T* p) : p_(p) { // NOLINT if (p) IncRef(p); } Shared(const Shared& other) : p_(other.p_) { IncRef(p_); } diff --git a/paddle/cinn/frontend/pass/pass_test_helper.h b/paddle/cinn/frontend/pass/pass_test_helper.h index 5c57835e1f16172ba495261e72918277105b6113..e56746a86368aa77c559495f8328b4254117a785 100644 --- a/paddle/cinn/frontend/pass/pass_test_helper.h +++ b/paddle/cinn/frontend/pass/pass_test_helper.h @@ -143,7 +143,7 @@ inline std::vector RunProgram( struct OptimizeConfig { struct PassGroup; - OptimizeConfig(const PassGroup& program_passes) + explicit OptimizeConfig(const PassGroup& program_passes) : program_passes{program_passes} { if (FLAGS_cinn_use_op_fusion) { graph_passes = {{"OpFusionPass", "FusionMergePass"}, diff --git a/paddle/cinn/frontend/program_pass.h b/paddle/cinn/frontend/program_pass.h index 4e54c70fc5ee43ee7cf1f04dbbcf9410951c9331..ecdb23ef2e1f076a865dc66a306f3498d861446a 100755 --- a/paddle/cinn/frontend/program_pass.h +++ b/paddle/cinn/frontend/program_pass.h @@ -28,7 +28,7 @@ namespace frontend { class ProgramPass { public: - ProgramPass(const std::string& name) : name_(name) {} + explicit ProgramPass(const std::string& name) : name_(name) {} /** * \brief Apply a sequence of passes on a program. diff --git a/paddle/cinn/hlir/framework/op_lowering_util.cc b/paddle/cinn/hlir/framework/op_lowering_util.cc index 807d70eb864d0ae50fdfb5c7078e2b30ec33f2ad..88963f5b989f7bbb6ef1bb6adafc4910469b304c 100644 --- a/paddle/cinn/hlir/framework/op_lowering_util.cc +++ b/paddle/cinn/hlir/framework/op_lowering_util.cc @@ -1185,7 +1185,7 @@ void LoopAssignReduce( // The struct used to remove the original block in ComputeAt. class RemoveExpr : public ir::IRMutator<> { public: - RemoveExpr(const Expr& target) : target_(target) {} + explicit RemoveExpr(const Expr& target) : target_(target) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } diff --git a/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc b/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc index 3573adb8101823881ea38c81a878637cd5de4bc2..52a5d128860d21e1780f273ab04ede3d116907de 100644 --- a/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc +++ b/paddle/cinn/hlir/pass/check_fusion_accuracy_pass.cc @@ -50,7 +50,7 @@ using DtypeDict = absl::flat_hash_map; namespace utils { class AssertMsg { public: - AssertMsg(int group_id) : group_id_(group_id) {} + explicit AssertMsg(int group_id) : group_id_(group_id) {} void SetMsg(const std::string& title, const std::string& msg) { msg_info_[title] = msg; @@ -80,7 +80,7 @@ class AssertMsg { class CheckFusionAccuracyPass { public: - CheckFusionAccuracyPass(Graph* graph) + explicit CheckFusionAccuracyPass(Graph* graph) : graph_(graph), shape_dict_(graph_->GetMutableAttrs("infershape")), dtype_dict_(graph_->GetMutableAttrs("inferdtype")) {} diff --git a/paddle/cinn/hlir/pass/constant_folding_pass.cc b/paddle/cinn/hlir/pass/constant_folding_pass.cc index e0396b137395ddd5bc8b7cc0467dc324c7af3f0b..50a76f54cb312e9e3a6832b196b91d97f1463424 100644 --- a/paddle/cinn/hlir/pass/constant_folding_pass.cc +++ b/paddle/cinn/hlir/pass/constant_folding_pass.cc @@ -35,7 +35,7 @@ using AlterFunction = // class ConstantFoldingPassHelper : public FusionHelperBase { public: - ConstantFoldingPassHelper(Graph* graph) + explicit ConstantFoldingPassHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) { RegisterAlterFunction(); } diff --git a/paddle/cinn/hlir/pass/custom_call_pass.cc b/paddle/cinn/hlir/pass/custom_call_pass.cc index 2d47a211c6b14c22cdd0dc49e020eb79bdd81e39..9990683788165004b1e1f4562bf455816b4cf69b 100644 --- a/paddle/cinn/hlir/pass/custom_call_pass.cc +++ b/paddle/cinn/hlir/pass/custom_call_pass.cc @@ -30,7 +30,7 @@ using framework::NodeData; class GraphAlterHelper { public: - GraphAlterHelper(Graph* graph) : graph_(graph) { + explicit GraphAlterHelper(Graph* graph) : graph_(graph) { if (!FLAGS_cinn_custom_call_deny_ops.empty()) { auto splited_names = cinn::utils::Split(FLAGS_cinn_custom_call_deny_ops, ";"); diff --git a/paddle/cinn/hlir/pass/dce_pass.cc b/paddle/cinn/hlir/pass/dce_pass.cc index 32e6e952ed6a8291a1205bb153d5b384e91f2cf6..fd439c1e97cfc7d4dad3d02eaad3938ec056e078 100644 --- a/paddle/cinn/hlir/pass/dce_pass.cc +++ b/paddle/cinn/hlir/pass/dce_pass.cc @@ -38,7 +38,7 @@ using ConditionFunction = class DceHelper : public FusionHelperBase { public: - DceHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {} + explicit DceHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {} void operator()() { if (output_nodes_set_.empty()) { diff --git a/paddle/cinn/hlir/pass/dense_merge_pass.cc b/paddle/cinn/hlir/pass/dense_merge_pass.cc index 3ffd7fb369e71c2e441ba7e74dd5bdbdefadc6ef..c8433f3a85fc7f281b7061d0d338322d3becd512 100644 --- a/paddle/cinn/hlir/pass/dense_merge_pass.cc +++ b/paddle/cinn/hlir/pass/dense_merge_pass.cc @@ -31,7 +31,8 @@ using framework::NodeAttr; class DenseMergePassHelper : public FusionHelperBase { public: - DenseMergePassHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {} + explicit DenseMergePassHelper(Graph* graph) + : FusionHelperBase(graph), graph_(graph) {} void operator()() { auto nodes_inorder = std::get<0>(graph_->topological_order()); diff --git a/paddle/cinn/hlir/pass/fusion_helper_base.h b/paddle/cinn/hlir/pass/fusion_helper_base.h index c15abaa8b9a19a19b1d581e42df9cbd957558943..d3c9e5c075529a27cc924cff3cc53b72dea4b895 100644 --- a/paddle/cinn/hlir/pass/fusion_helper_base.h +++ b/paddle/cinn/hlir/pass/fusion_helper_base.h @@ -37,7 +37,7 @@ using framework::shape_t; class FusionHelperBase { public: - FusionHelperBase(const framework::Graph* graph) + explicit FusionHelperBase(const framework::Graph* graph) : shape_dict_(graph->GetAttrs>( "infershape")), target_(graph->target_) { diff --git a/paddle/cinn/hlir/pass/fusion_merge_pass.cc b/paddle/cinn/hlir/pass/fusion_merge_pass.cc index af28654703702378844dd5832affdf60bce00e86..dc09bd5c7b5723c14c86d9b00b0e6d79777e6737 100644 --- a/paddle/cinn/hlir/pass/fusion_merge_pass.cc +++ b/paddle/cinn/hlir/pass/fusion_merge_pass.cc @@ -44,7 +44,7 @@ using ConditionFunction = std::functionfusion_groups; // init fusion relation. InitFusionRelation(); diff --git a/paddle/cinn/hlir/pass/op_fusion_pass.cc b/paddle/cinn/hlir/pass/op_fusion_pass.cc index 6648cb036131c24e8bfce0f54cba014d6263e2af..302c9b71b5a9d9912a61f42ad437cdc3f20fa184 100644 --- a/paddle/cinn/hlir/pass/op_fusion_pass.cc +++ b/paddle/cinn/hlir/pass/op_fusion_pass.cc @@ -40,7 +40,7 @@ using ConditionFunction = // code generation. class OpFusionPassHelper : public FusionHelperBase { public: - OpFusionPassHelper(const Graph* graph) : FusionHelperBase(graph) { + explicit OpFusionPassHelper(const Graph* graph) : FusionHelperBase(graph) { // init fusion relation InitFusionRelation(); // filter node data, create group for each node diff --git a/paddle/cinn/hlir/pass/single_group_optimize_pass.cc b/paddle/cinn/hlir/pass/single_group_optimize_pass.cc index c79dbfcb5ef2497657fab53c20252e5bfe56c554..1f8982192cddc561f72c770e693769bba6655c13 100644 --- a/paddle/cinn/hlir/pass/single_group_optimize_pass.cc +++ b/paddle/cinn/hlir/pass/single_group_optimize_pass.cc @@ -46,7 +46,7 @@ bool IsValueZero(cinn::utils::Attribute value) { class SingleGroupOptimizePass { public: - SingleGroupOptimizePass(Graph* graph); + explicit SingleGroupOptimizePass(Graph* graph); std::vector> Apply(); diff --git a/paddle/cinn/hlir/pe/schedule.h b/paddle/cinn/hlir/pe/schedule.h index 3c05084335c8d4c30c534b01fb95cb87a6bd6eec..c22c8fbb7a93a449c161c9c1c1bdaea15b6e04c6 100644 --- a/paddle/cinn/hlir/pe/schedule.h +++ b/paddle/cinn/hlir/pe/schedule.h @@ -54,7 +54,7 @@ class ScheduleParam { int Count(const std::string &key) { return param_data.count(key); } private: - ScheduleParam(common::Target::Arch arch); + explicit ScheduleParam(common::Target::Arch arch); absl::flat_hash_map>> param_data; diff --git a/paddle/cinn/ir/collect_ir_nodes.cc b/paddle/cinn/ir/collect_ir_nodes.cc index afbf99e59d9c1371439f33fc4be0f33244129d61..4c00ac975cc29944d4d33e1cbf362419c93a5a80 100644 --- a/paddle/cinn/ir/collect_ir_nodes.cc +++ b/paddle/cinn/ir/collect_ir_nodes.cc @@ -151,7 +151,7 @@ std::set CollectLoadTensors(Expr x, struct Mutator : public ir::IRMutator { std::function teller; std::set exprs; - Mutator(std::function&& teller) + explicit Mutator(std::function&& teller) : teller(std::move(teller)) {} void operator()(const Expr* expr) { @@ -175,7 +175,7 @@ std::set CollectStoreTensors(Expr x, struct Mutator : public ir::IRMutator { std::function teller; std::set exprs; - Mutator(std::function&& teller) + explicit Mutator(std::function&& teller) : teller(std::move(teller)) {} void operator()(const Expr* expr) { diff --git a/paddle/cinn/ir/function_base.h b/paddle/cinn/ir/function_base.h index dd4be902da2000b290f36cc7a58394aa0024a0ab..78cef3d506687f6761b07aa66fed3acfcef50395 100644 --- a/paddle/cinn/ir/function_base.h +++ b/paddle/cinn/ir/function_base.h @@ -27,7 +27,7 @@ class FunctionBase : public IrNode { class FunctionRef : public IrNodeRef { public: FunctionRef() = default; - FunctionRef(IrNode* n) : IrNodeRef(n) {} + explicit FunctionRef(IrNode* n) : IrNodeRef(n) {} }; } // namespace ir diff --git a/paddle/cinn/ir/ir_schedule.cc b/paddle/cinn/ir/ir_schedule.cc index 0214c83bb3cd72e775e5bbb424166ba95b352374..04041c9d8c39c3bb85bf8d6185db9620df859cef 100644 --- a/paddle/cinn/ir/ir_schedule.cc +++ b/paddle/cinn/ir/ir_schedule.cc @@ -1216,7 +1216,7 @@ struct LoopReconstructor : public ir::IRMutator<> { struct FixLocalBufferSize : public ir::IRMutator<> { public: - FixLocalBufferSize(const std::string& tensor_name) + explicit FixLocalBufferSize(const std::string& tensor_name) : tensor_name_(tensor_name) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } @@ -1697,7 +1697,8 @@ void ScheduleImpl::ReverseComputeInline(const Expr& schedule_block) { struct FindBlockParent : public ir::IRMutator<> { public: - FindBlockParent(const std::string& block_name) : block_name_(block_name) {} + explicit FindBlockParent(const std::string& block_name) + : block_name_(block_name) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } diff --git a/paddle/cinn/ir/ir_schedule_util.h b/paddle/cinn/ir/ir_schedule_util.h index 802a134d23bc4cccd1e442284e0cbe194e4a420e..0107054e413767e5dc7895bf624963947a238dce 100644 --- a/paddle/cinn/ir/ir_schedule_util.h +++ b/paddle/cinn/ir/ir_schedule_util.h @@ -45,7 +45,8 @@ struct CompVar { }; struct MappingVarToExprMutator : public ir::IRMutator<> { - MappingVarToExprMutator(const std::map& replacing_map) + explicit MappingVarToExprMutator( + const std::map& replacing_map) : replacing_map_(replacing_map) {} void operator()(Expr* expr) { IRMutator::Visit(expr, expr); } @@ -62,7 +63,7 @@ struct MappingVarToExprMutator : public ir::IRMutator<> { }; struct FindLoopsVisitor { - FindLoopsVisitor(const Expr& block) : block_(block) {} + explicit FindLoopsVisitor(const Expr& block) : block_(block) {} std::vector operator()(const Expr* expr) { CHECK(block_.As()); @@ -112,7 +113,7 @@ struct FindLoopsVisitor { Tensor GetTensor(const Expr& block); struct FindBlocksVisitor { - FindBlocksVisitor(const std::string& block_name = "") + explicit FindBlocksVisitor(const std::string& block_name = "") : block_name_(block_name) {} std::vector operator()(const Expr* expr) { diff --git a/paddle/cinn/ir/schedule_desc.h b/paddle/cinn/ir/schedule_desc.h index 9cac7ac87816df2dfa9a5b2539cdd6efeea10d96..c166479e695d53a29c05658c534d99dcc5fea55e 100644 --- a/paddle/cinn/ir/schedule_desc.h +++ b/paddle/cinn/ir/schedule_desc.h @@ -66,9 +66,9 @@ class ScheduleDesc { ScheduleDesc() = default; - ScheduleDesc(const std::vector& steps) : steps_(steps) {} + explicit ScheduleDesc(const std::vector& steps) : steps_(steps) {} - ScheduleDesc(std::vector&& steps) : steps_(steps) {} + explicit ScheduleDesc(std::vector&& steps) : steps_(steps) {} // Append a new step void Append(Step&& step); diff --git a/paddle/cinn/optim/buffer_assign.cc b/paddle/cinn/optim/buffer_assign.cc index f5f0e47a68fee28a6e26d56f1bcbe4a6ba0d4eab..dcfd6d8b8fb23d2d1b051c704f6514d1723777ec 100644 --- a/paddle/cinn/optim/buffer_assign.cc +++ b/paddle/cinn/optim/buffer_assign.cc @@ -26,7 +26,7 @@ namespace optim { namespace { struct BufferUFNode : public common::UnionFindNode { - BufferUFNode(const std::string& x) : tensor_name(x) {} + explicit BufferUFNode(const std::string& x) : tensor_name(x) {} const char* type_info() const override { return __type_info__; } @@ -38,7 +38,8 @@ const char* BufferUFNode::__type_info__ = "BufferUFNode"; struct IRReplaceTensorMutator : ir::IRMutator<> { const std::map& tensor_map; - IRReplaceTensorMutator(const std::map& tensor_map) + explicit IRReplaceTensorMutator( + const std::map& tensor_map) : tensor_map(tensor_map) {} void operator()(Expr* expr) { ir::IRMutator<>::Visit(expr, expr); } diff --git a/paddle/cinn/optim/ir_simplify.cc b/paddle/cinn/optim/ir_simplify.cc index a101482b64f7282ec229b55c039758ae93ff41a0..48645690f9de7ac42457cd471e398e817052574d 100644 --- a/paddle/cinn/optim/ir_simplify.cc +++ b/paddle/cinn/optim/ir_simplify.cc @@ -261,7 +261,7 @@ struct ReplaceFracWithDivMutator : public ir::IRMutator<> { }; struct SimplifyBlocksMutator : public ir::IRMutator<> { - explicit SimplifyBlocksMutator() {} + SimplifyBlocksMutator() {} void operator()(Expr* x) { ir::IRMutator::Visit(x, x); } @@ -320,7 +320,7 @@ struct SimplifyBlocksMutator : public ir::IRMutator<> { struct SimplifyForLoopsMutator : public ir::IRMutator<> { absl::flat_hash_map var_intervals; - explicit SimplifyForLoopsMutator() {} + SimplifyForLoopsMutator() {} void operator()(Expr* x) { ir::IRMutator::Visit(x, x); } diff --git a/paddle/cinn/optim/replace_var_with_expr.cc b/paddle/cinn/optim/replace_var_with_expr.cc index 10dc22c80b0974f304117190bb1160fa88e19669..f994eeb2e2a552f129c0282880a0880e6feec40d 100644 --- a/paddle/cinn/optim/replace_var_with_expr.cc +++ b/paddle/cinn/optim/replace_var_with_expr.cc @@ -116,7 +116,7 @@ void ReplaceVarWithExpr(Expr* source, } struct CollectTensorIndexMutator : public ir::IRMutator<> { - CollectTensorIndexMutator(const std::string& tensor_name) + explicit CollectTensorIndexMutator(const std::string& tensor_name) : tensor_name_(tensor_name) {} std::vector> operator()(Expr* expr) { diff --git a/paddle/cinn/poly/dim.h b/paddle/cinn/poly/dim.h index 6b197eaf214ff26b6a3c2e0c68d2f158d2d5e495..5ae7ee7a897d6f85894689be3e82fd76b443f34e 100644 --- a/paddle/cinn/poly/dim.h +++ b/paddle/cinn/poly/dim.h @@ -46,7 +46,7 @@ struct Dim { value_t upper_bound; //! Construct a parameter. - Dim(std::string id) : id(std::move(id)) {} + explicit Dim(std::string id) : id(std::move(id)) {} //! Construct a dimension with integer range. Dim(std::string id, uint32_t lower_bound, uint32_t upper_bound) diff --git a/paddle/cinn/runtime/buffer.h b/paddle/cinn/runtime/buffer.h index c3eb5c43b58e387dc0b63d26e187bde0643ee55b..b211389c6dccec964dd19b631d6658f32e34616c 100755 --- a/paddle/cinn/runtime/buffer.h +++ b/paddle/cinn/runtime/buffer.h @@ -64,7 +64,7 @@ struct Shape { template class Buffer { public: - Buffer(const Shape& shape) : shape_(shape) {} + explicit Buffer(const Shape& shape) : shape_(shape) {} //! Allocate the memory in host device. void AllocHost() { diff --git a/paddle/cinn/runtime/cuda/cuda_util.cc b/paddle/cinn/runtime/cuda/cuda_util.cc index 331e67868954dc8d4ea756d2f6ee5c4dcd234549..6fb82ccb8a05ad12a82559bdda7f2990d945f0b6 100644 --- a/paddle/cinn/runtime/cuda/cuda_util.cc +++ b/paddle/cinn/runtime/cuda/cuda_util.cc @@ -1972,7 +1972,7 @@ class CurandGenerator { CURAND_CALL(curandCreateGenerator(&generator_, CURAND_RNG_PSEUDO_DEFAULT)); } - CurandGenerator(curandRngType rng_type) { + explicit CurandGenerator(curandRngType rng_type) { CURAND_CALL(curandCreateGenerator(&generator_, rng_type)); } diff --git a/paddle/cinn/utils/event.cc b/paddle/cinn/utils/event.cc index 6f319bf5e44a272d5ebc7d7dba2b94e580046ccb..ca06ae73c67666951755f8985fd59bace7d2dedc 100644 --- a/paddle/cinn/utils/event.cc +++ b/paddle/cinn/utils/event.cc @@ -76,9 +76,9 @@ std::string Summary::Format(const std::vector &events) { } // Calculate Ratio for (auto &item : items) { - item.sub_raito = + item.sub_ratio = item.info.duration_ / category_cost[item.info.type_] * 100.0; - item.total_raito = item.info.duration_ / total_cost * 100.0; + item.total_ratio = item.info.duration_ / total_cost * 100.0; } std::sort(items.begin(), items.end()); @@ -113,8 +113,8 @@ std::string Summary::AsStr(const std::vector &items, int data_width) { std::vector infos = {EventTypeToString(item.info.type_), item.info.annotation_, std::to_string(item.info.duration_), - item.sub_raito.ToStr(), - item.total_raito.ToStr()}; + item.sub_ratio.ToStr(), + item.total_ratio.ToStr()}; idx = 0; for (auto &info : infos) { pad_size = widths[idx] > info.size() ? widths[idx] - info.size() : 1; diff --git a/paddle/cinn/utils/event.h b/paddle/cinn/utils/event.h index a87dcf4828e0e9f322ba91f4ab7dbed27a87f37b..5d7b8113a1d8b40da83ce466faedcbd53011ea09 100644 --- a/paddle/cinn/utils/event.h +++ b/paddle/cinn/utils/event.h @@ -67,20 +67,20 @@ struct HostEvent { class Summary { public: - struct Raito { + struct Ratio { double value; - Raito(double val) : value(val) {} + Ratio(double val) : value(val) {} // NOLINT std::string ToStr() const { return std::to_string(value); } }; struct Item { HostEvent info; - Raito sub_raito{0.0}; // percentage of EventType - Raito total_raito{0.0}; // precentage of total process + Ratio sub_ratio{0.0}; // percentage of EventType + Ratio total_ratio{0.0}; // precentage of total process - Item(const HostEvent& e) : info(e) {} + explicit Item(const HostEvent& e) : info(e) {} bool operator<(const Item& other) const { - return total_raito.value > other.total_raito.value; + return total_ratio.value > other.total_ratio.value; } }; diff --git a/paddle/cinn/utils/profiler.h b/paddle/cinn/utils/profiler.h index 369e9749e33fcc5aaa4d0d41a6a22c57e879a8f8..e629948e3042d20aa99cdae14cddafafec0cf6ea 100644 --- a/paddle/cinn/utils/profiler.h +++ b/paddle/cinn/utils/profiler.h @@ -66,7 +66,8 @@ class RecordEvent { using CallBack = std::function; public: - RecordEvent(const std::string& name, EventType type = EventType::kOrdinary); + explicit RecordEvent(const std::string& name, + EventType type = EventType::kOrdinary); void End(); diff --git a/paddle/cinn/utils/random_engine.h b/paddle/cinn/utils/random_engine.h index b94dcda043e7d79edc180225d6eeb3e7e665adbb..05162b288c7818e433992eace9406f42c5622fbf 100644 --- a/paddle/cinn/utils/random_engine.h +++ b/paddle/cinn/utils/random_engine.h @@ -49,7 +49,7 @@ class LinearRandomEngine { static constexpr StateType modulus = 2147483647; // Construct a linear random engine with a random state pointer - LinearRandomEngine(StateType* state) : state_(state) {} + explicit LinearRandomEngine(StateType* state) : state_(state) {} // operator() is needed by std::xxx_distribution result_type operator()() { return Next(); } diff --git a/paddle/cinn/utils/sized_multi_set.h b/paddle/cinn/utils/sized_multi_set.h index f6a35f3627f86e046d4858120d0e85ca1aa9bcea..d36fb7a01920bdb197e4779d9e3313fa98ce97bc 100644 --- a/paddle/cinn/utils/sized_multi_set.h +++ b/paddle/cinn/utils/sized_multi_set.h @@ -37,7 +37,7 @@ template > class SizedMultiSet { public: - SizedMultiSet(size_t capacity, bool pop_max_when_full = true) + explicit SizedMultiSet(size_t capacity, bool pop_max_when_full = true) : capacity_(capacity), pop_max_when_full_(pop_max_when_full) {} void Push(const T& data) { diff --git a/test/cpp/cinn/program_builder.h b/test/cpp/cinn/program_builder.h index dde7093b1f7b78c4a7ca395a5b5a22e35406a9a3..febfe36d8e8bcf05b42e89051a63352a8bb74406 100644 --- a/test/cpp/cinn/program_builder.h +++ b/test/cpp/cinn/program_builder.h @@ -40,7 +40,7 @@ struct VariableInfo { // variables and attributes class ProgramBuilder { public: - ProgramBuilder(const std::string& name) : builder_(name) {} + explicit ProgramBuilder(const std::string& name) : builder_(name) {} /* * \brief Build a frontend::Program with the input variables info and