未验证 提交 ef19521c 编写于 作者: D Difer 提交者: GitHub

[CodeStyle][CINN] fix cinn codestyle cpplint `[runtime/explicit]` (#55036)

* fix cinn cpplint [runtime/explicit]

* mark shared nolint

* add a nolint and fix typo raito -> ratio

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 16a58283
......@@ -134,7 +134,7 @@ class Feature {
public:
Feature();
Feature(const common::Target& target);
explicit Feature(const common::Target& target);
// Convert the various-length loop block features to fixed-size vector
std::vector<float> ToFixedSizeVector();
......
......@@ -34,7 +34,7 @@ struct TuningRecord {
double execution_cost; // unit: us
TuningRecord() = default;
TuningRecord(const proto::TuningRecord& record)
explicit TuningRecord(const proto::TuningRecord& record)
: task_key(record.task_key()),
predicted_cost(record.predicted_cost()),
trace(record.trace()),
......
......@@ -24,7 +24,7 @@ namespace auto_schedule {
// the input schedule as executable objects
class SimpleBuilder : public ScheduleBuilder {
public:
SimpleBuilder(hlir::framework::GraphCompiler* graph_compiler);
explicit SimpleBuilder(hlir::framework::GraphCompiler* graph_compiler);
// Build and pack the result
BuildResult Build(const MeasureInput& input) override;
......
......@@ -24,7 +24,7 @@ namespace auto_schedule {
// kernels and count the elapsed time as the measurement of performance
class SimpleRunner : public ScheduleRunner {
public:
SimpleRunner(int repeat_times);
explicit SimpleRunner(int repeat_times);
MeasureResult Run(const MeasureInput& input,
const BuildResult& build_result) override;
......
......@@ -27,7 +27,7 @@ namespace auto_schedule {
// Auto bind GPU index(BlockIdx, ThreadIdx) to the loops around the block
class AutoBind : public AutoGenRule {
public:
AutoBind(const common::Target& target) : AutoGenRule(target) {}
explicit AutoBind(const common::Target& target) : AutoGenRule(target) {}
~AutoBind() = default;
RuleApplyType Init(ir::IRSchedule* init_schedule) override;
......
......@@ -45,7 +45,7 @@ enum class RuleApplyType : int {
*/
class AutoGenRule {
public:
AutoGenRule(const common::Target& target);
explicit AutoGenRule(const common::Target& target);
~AutoGenRule() = default;
// Initialize the AutoGenRule, it must be called before further actions.
......
......@@ -31,7 +31,7 @@ namespace auto_schedule {
// based on actual situation.
class AutoUnroll : public AutoGenRule {
public:
AutoUnroll(const common::Target& target) : AutoGenRule(target) {}
explicit AutoUnroll(const common::Target& target) : AutoGenRule(target) {}
~AutoUnroll() = default;
RuleApplyType Init(ir::IRSchedule* init_schedule) override;
......
......@@ -25,7 +25,7 @@ namespace auto_schedule {
class SkipRule : public AutoGenRule {
public:
SkipRule(const common::Target& target);
explicit SkipRule(const common::Target& target);
~SkipRule() = default;
RuleApplyType Init(ir::IRSchedule* init_schedule) override;
......
......@@ -91,7 +91,7 @@ class DfsWithExprsFields : public ir::IRVisitor {
// Generate a reduce hash of a AST tree by combining hash of each AST node
class IrNodesStructuralHash : public DfsWithExprsFields {
public:
IrNodesStructuralHash(size_t init_key) : hash_key_(init_key) {}
explicit IrNodesStructuralHash(size_t init_key) : hash_key_(init_key) {}
size_t operator()(const Expr* expr) {
Visit(expr);
return hash_key_;
......
......@@ -64,7 +64,8 @@ std::vector<TuneTask> CreateTasks(const frontend::Program& program,
*/
class MockSearchSpace : public SearchSpace {
public:
MockSearchSpace(const TuneTask& tune_task) : SearchSpace(tune_task) {}
explicit MockSearchSpace(const TuneTask& tune_task)
: SearchSpace(tune_task) {}
int GetMinExprValue() const { return min_expr_value_; }
......
......@@ -45,7 +45,7 @@ class TaskOptimizer {
std::string from;
double cost;
FunctionGroup functions;
Result(const std::string& from_type)
explicit Result(const std::string& from_type)
: from(from_type), cost(std::numeric_limits<double>::max()) {}
};
......
......@@ -36,7 +36,7 @@ namespace auto_schedule {
class TuneTask {
public:
TuneTask() = default;
TuneTask(std::shared_ptr<hlir::framework::Graph::Group> group)
explicit TuneTask(std::shared_ptr<hlir::framework::Graph::Group> group)
: subgraph(group) {}
// Initialize a task
void Initialize(
......
......@@ -55,7 +55,7 @@ struct Shared {
using object_ptr = T*;
Shared() = default;
Shared(T* p) : p_(p) {
Shared(T* p) : p_(p) { // NOLINT
if (p) IncRef(p);
}
Shared(const Shared& other) : p_(other.p_) { IncRef(p_); }
......
......@@ -143,7 +143,7 @@ inline std::vector<float> RunProgram(
struct OptimizeConfig {
struct PassGroup;
OptimizeConfig(const PassGroup& program_passes)
explicit OptimizeConfig(const PassGroup& program_passes)
: program_passes{program_passes} {
if (FLAGS_cinn_use_op_fusion) {
graph_passes = {{"OpFusionPass", "FusionMergePass"},
......
......@@ -28,7 +28,7 @@ namespace frontend {
class ProgramPass {
public:
ProgramPass(const std::string& name) : name_(name) {}
explicit ProgramPass(const std::string& name) : name_(name) {}
/**
* \brief Apply a sequence of passes on a program.
......
......@@ -1185,7 +1185,7 @@ void LoopAssignReduce(
// The struct used to remove the original block in ComputeAt.
class RemoveExpr : public ir::IRMutator<> {
public:
RemoveExpr(const Expr& target) : target_(target) {}
explicit RemoveExpr(const Expr& target) : target_(target) {}
void operator()(Expr* expr) { IRMutator::Visit(expr, expr); }
......
......@@ -50,7 +50,7 @@ using DtypeDict = absl::flat_hash_map<std::string, common::Type>;
namespace utils {
class AssertMsg {
public:
AssertMsg(int group_id) : group_id_(group_id) {}
explicit AssertMsg(int group_id) : group_id_(group_id) {}
void SetMsg(const std::string& title, const std::string& msg) {
msg_info_[title] = msg;
......@@ -80,7 +80,7 @@ class AssertMsg {
class CheckFusionAccuracyPass {
public:
CheckFusionAccuracyPass(Graph* graph)
explicit CheckFusionAccuracyPass(Graph* graph)
: graph_(graph),
shape_dict_(graph_->GetMutableAttrs<ShapeDict>("infershape")),
dtype_dict_(graph_->GetMutableAttrs<DtypeDict>("inferdtype")) {}
......
......@@ -35,7 +35,7 @@ using AlterFunction =
//
class ConstantFoldingPassHelper : public FusionHelperBase {
public:
ConstantFoldingPassHelper(Graph* graph)
explicit ConstantFoldingPassHelper(Graph* graph)
: FusionHelperBase(graph), graph_(graph) {
RegisterAlterFunction();
}
......
......@@ -30,7 +30,7 @@ using framework::NodeData;
class GraphAlterHelper {
public:
GraphAlterHelper(Graph* graph) : graph_(graph) {
explicit GraphAlterHelper(Graph* graph) : graph_(graph) {
if (!FLAGS_cinn_custom_call_deny_ops.empty()) {
auto splited_names =
cinn::utils::Split(FLAGS_cinn_custom_call_deny_ops, ";");
......
......@@ -38,7 +38,7 @@ using ConditionFunction =
class DceHelper : public FusionHelperBase {
public:
DceHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {}
explicit DceHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {}
void operator()() {
if (output_nodes_set_.empty()) {
......
......@@ -31,7 +31,8 @@ using framework::NodeAttr;
class DenseMergePassHelper : public FusionHelperBase {
public:
DenseMergePassHelper(Graph* graph) : FusionHelperBase(graph), graph_(graph) {}
explicit DenseMergePassHelper(Graph* graph)
: FusionHelperBase(graph), graph_(graph) {}
void operator()() {
auto nodes_inorder = std::get<0>(graph_->topological_order());
......
......@@ -37,7 +37,7 @@ using framework::shape_t;
class FusionHelperBase {
public:
FusionHelperBase(const framework::Graph* graph)
explicit FusionHelperBase(const framework::Graph* graph)
: shape_dict_(graph->GetAttrs<absl::flat_hash_map<std::string, shape_t>>(
"infershape")),
target_(graph->target_) {
......
......@@ -44,7 +44,7 @@ using ConditionFunction = std::function<bool(
// code generation.
class FusionMergePassHelper : public FusionHelperBase {
public:
FusionMergePassHelper(const Graph* graph) : FusionHelperBase(graph) {
explicit FusionMergePassHelper(const Graph* graph) : FusionHelperBase(graph) {
fusion_groups_ = graph->fusion_groups;
// init fusion relation.
InitFusionRelation();
......
......@@ -40,7 +40,7 @@ using ConditionFunction =
// code generation.
class OpFusionPassHelper : public FusionHelperBase {
public:
OpFusionPassHelper(const Graph* graph) : FusionHelperBase(graph) {
explicit OpFusionPassHelper(const Graph* graph) : FusionHelperBase(graph) {
// init fusion relation
InitFusionRelation();
// filter node data, create group for each node
......
......@@ -46,7 +46,7 @@ bool IsValueZero(cinn::utils::Attribute value) {
class SingleGroupOptimizePass {
public:
SingleGroupOptimizePass(Graph* graph);
explicit SingleGroupOptimizePass(Graph* graph);
std::vector<std::shared_ptr<Group>> Apply();
......
......@@ -54,7 +54,7 @@ class ScheduleParam {
int Count(const std::string &key) { return param_data.count(key); }
private:
ScheduleParam(common::Target::Arch arch);
explicit ScheduleParam(common::Target::Arch arch);
absl::flat_hash_map<std::string,
absl::flat_hash_map<std::string, std::vector<int>>>
param_data;
......
......@@ -151,7 +151,7 @@ std::set<Expr> CollectLoadTensors(Expr x,
struct Mutator : public ir::IRMutator<const Expr*> {
std::function<bool(const Expr*)> teller;
std::set<Expr> exprs;
Mutator(std::function<bool(const Expr*)>&& teller)
explicit Mutator(std::function<bool(const Expr*)>&& teller)
: teller(std::move(teller)) {}
void operator()(const Expr* expr) {
......@@ -175,7 +175,7 @@ std::set<Expr> CollectStoreTensors(Expr x,
struct Mutator : public ir::IRMutator<const Expr*> {
std::function<bool(const Expr*)> teller;
std::set<Expr> exprs;
Mutator(std::function<bool(const Expr*)>&& teller)
explicit Mutator(std::function<bool(const Expr*)>&& teller)
: teller(std::move(teller)) {}
void operator()(const Expr* expr) {
......
......@@ -27,7 +27,7 @@ class FunctionBase : public IrNode {
class FunctionRef : public IrNodeRef {
public:
FunctionRef() = default;
FunctionRef(IrNode* n) : IrNodeRef(n) {}
explicit FunctionRef(IrNode* n) : IrNodeRef(n) {}
};
} // namespace ir
......
......@@ -1216,7 +1216,7 @@ struct LoopReconstructor : public ir::IRMutator<> {
struct FixLocalBufferSize : public ir::IRMutator<> {
public:
FixLocalBufferSize(const std::string& tensor_name)
explicit FixLocalBufferSize(const std::string& tensor_name)
: tensor_name_(tensor_name) {}
void operator()(Expr* expr) { IRMutator::Visit(expr, expr); }
......@@ -1697,7 +1697,8 @@ void ScheduleImpl::ReverseComputeInline(const Expr& schedule_block) {
struct FindBlockParent : public ir::IRMutator<> {
public:
FindBlockParent(const std::string& block_name) : block_name_(block_name) {}
explicit FindBlockParent(const std::string& block_name)
: block_name_(block_name) {}
void operator()(Expr* expr) { IRMutator::Visit(expr, expr); }
......
......@@ -45,7 +45,8 @@ struct CompVar {
};
struct MappingVarToExprMutator : public ir::IRMutator<> {
MappingVarToExprMutator(const std::map<Var, Expr, CompVar>& replacing_map)
explicit MappingVarToExprMutator(
const std::map<Var, Expr, CompVar>& replacing_map)
: replacing_map_(replacing_map) {}
void operator()(Expr* expr) { IRMutator::Visit(expr, expr); }
......@@ -62,7 +63,7 @@ struct MappingVarToExprMutator : public ir::IRMutator<> {
};
struct FindLoopsVisitor {
FindLoopsVisitor(const Expr& block) : block_(block) {}
explicit FindLoopsVisitor(const Expr& block) : block_(block) {}
std::vector<Expr> operator()(const Expr* expr) {
CHECK(block_.As<ir::ScheduleBlockRealize>());
......@@ -112,7 +113,7 @@ struct FindLoopsVisitor {
Tensor GetTensor(const Expr& block);
struct FindBlocksVisitor {
FindBlocksVisitor(const std::string& block_name = "")
explicit FindBlocksVisitor(const std::string& block_name = "")
: block_name_(block_name) {}
std::vector<Expr> operator()(const Expr* expr) {
......
......@@ -66,9 +66,9 @@ class ScheduleDesc {
ScheduleDesc() = default;
ScheduleDesc(const std::vector<Step>& steps) : steps_(steps) {}
explicit ScheduleDesc(const std::vector<Step>& steps) : steps_(steps) {}
ScheduleDesc(std::vector<Step>&& steps) : steps_(steps) {}
explicit ScheduleDesc(std::vector<Step>&& steps) : steps_(steps) {}
// Append a new step
void Append(Step&& step);
......
......@@ -26,7 +26,7 @@ namespace optim {
namespace {
struct BufferUFNode : public common::UnionFindNode {
BufferUFNode(const std::string& x) : tensor_name(x) {}
explicit BufferUFNode(const std::string& x) : tensor_name(x) {}
const char* type_info() const override { return __type_info__; }
......@@ -38,7 +38,8 @@ const char* BufferUFNode::__type_info__ = "BufferUFNode";
struct IRReplaceTensorMutator : ir::IRMutator<> {
const std::map<std::string, ir::Tensor>& tensor_map;
IRReplaceTensorMutator(const std::map<std::string, ir::Tensor>& tensor_map)
explicit IRReplaceTensorMutator(
const std::map<std::string, ir::Tensor>& tensor_map)
: tensor_map(tensor_map) {}
void operator()(Expr* expr) { ir::IRMutator<>::Visit(expr, expr); }
......
......@@ -261,7 +261,7 @@ struct ReplaceFracWithDivMutator : public ir::IRMutator<> {
};
struct SimplifyBlocksMutator : public ir::IRMutator<> {
explicit SimplifyBlocksMutator() {}
SimplifyBlocksMutator() {}
void operator()(Expr* x) { ir::IRMutator<ir::Expr*>::Visit(x, x); }
......@@ -320,7 +320,7 @@ struct SimplifyBlocksMutator : public ir::IRMutator<> {
struct SimplifyForLoopsMutator : public ir::IRMutator<> {
absl::flat_hash_map<std::string, common::CasInterval> var_intervals;
explicit SimplifyForLoopsMutator() {}
SimplifyForLoopsMutator() {}
void operator()(Expr* x) { ir::IRMutator<ir::Expr*>::Visit(x, x); }
......
......@@ -116,7 +116,7 @@ void ReplaceVarWithExpr(Expr* source,
}
struct CollectTensorIndexMutator : public ir::IRMutator<> {
CollectTensorIndexMutator(const std::string& tensor_name)
explicit CollectTensorIndexMutator(const std::string& tensor_name)
: tensor_name_(tensor_name) {}
std::vector<std::vector<Expr>> operator()(Expr* expr) {
......
......@@ -46,7 +46,7 @@ struct Dim {
value_t upper_bound;
//! Construct a parameter.
Dim(std::string id) : id(std::move(id)) {}
explicit Dim(std::string id) : id(std::move(id)) {}
//! Construct a dimension with integer range.
Dim(std::string id, uint32_t lower_bound, uint32_t upper_bound)
......
......@@ -64,7 +64,7 @@ struct Shape {
template <typename T>
class Buffer {
public:
Buffer(const Shape& shape) : shape_(shape) {}
explicit Buffer(const Shape& shape) : shape_(shape) {}
//! Allocate the memory in host device.
void AllocHost() {
......
......@@ -1972,7 +1972,7 @@ class CurandGenerator {
CURAND_CALL(curandCreateGenerator(&generator_, CURAND_RNG_PSEUDO_DEFAULT));
}
CurandGenerator(curandRngType rng_type) {
explicit CurandGenerator(curandRngType rng_type) {
CURAND_CALL(curandCreateGenerator(&generator_, rng_type));
}
......
......@@ -76,9 +76,9 @@ std::string Summary::Format(const std::vector<HostEvent> &events) {
}
// Calculate Ratio
for (auto &item : items) {
item.sub_raito =
item.sub_ratio =
item.info.duration_ / category_cost[item.info.type_] * 100.0;
item.total_raito = item.info.duration_ / total_cost * 100.0;
item.total_ratio = item.info.duration_ / total_cost * 100.0;
}
std::sort(items.begin(), items.end());
......@@ -113,8 +113,8 @@ std::string Summary::AsStr(const std::vector<Item> &items, int data_width) {
std::vector<std::string> infos = {EventTypeToString(item.info.type_),
item.info.annotation_,
std::to_string(item.info.duration_),
item.sub_raito.ToStr(),
item.total_raito.ToStr()};
item.sub_ratio.ToStr(),
item.total_ratio.ToStr()};
idx = 0;
for (auto &info : infos) {
pad_size = widths[idx] > info.size() ? widths[idx] - info.size() : 1;
......
......@@ -67,20 +67,20 @@ struct HostEvent {
class Summary {
public:
struct Raito {
struct Ratio {
double value;
Raito(double val) : value(val) {}
Ratio(double val) : value(val) {} // NOLINT
std::string ToStr() const { return std::to_string(value); }
};
struct Item {
HostEvent info;
Raito sub_raito{0.0}; // percentage of EventType
Raito total_raito{0.0}; // precentage of total process
Ratio sub_ratio{0.0}; // percentage of EventType
Ratio total_ratio{0.0}; // precentage of total process
Item(const HostEvent& e) : info(e) {}
explicit Item(const HostEvent& e) : info(e) {}
bool operator<(const Item& other) const {
return total_raito.value > other.total_raito.value;
return total_ratio.value > other.total_ratio.value;
}
};
......
......@@ -66,7 +66,8 @@ class RecordEvent {
using CallBack = std::function<void()>;
public:
RecordEvent(const std::string& name, EventType type = EventType::kOrdinary);
explicit RecordEvent(const std::string& name,
EventType type = EventType::kOrdinary);
void End();
......
......@@ -49,7 +49,7 @@ class LinearRandomEngine {
static constexpr StateType modulus = 2147483647;
// Construct a linear random engine with a random state pointer
LinearRandomEngine(StateType* state) : state_(state) {}
explicit LinearRandomEngine(StateType* state) : state_(state) {}
// operator() is needed by std::xxx_distribution
result_type operator()() { return Next(); }
......
......@@ -37,7 +37,7 @@ template <class T,
class Alloc = std::allocator<T>>
class SizedMultiSet {
public:
SizedMultiSet(size_t capacity, bool pop_max_when_full = true)
explicit SizedMultiSet(size_t capacity, bool pop_max_when_full = true)
: capacity_(capacity), pop_max_when_full_(pop_max_when_full) {}
void Push(const T& data) {
......
......@@ -40,7 +40,7 @@ struct VariableInfo {
// variables and attributes
class ProgramBuilder {
public:
ProgramBuilder(const std::string& name) : builder_(name) {}
explicit ProgramBuilder(const std::string& name) : builder_(name) {}
/*
* \brief Build a frontend::Program with the input variables info and
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册