diff --git a/paddle/cinn/common/cas.cc b/paddle/cinn/common/cas.cc index ab23d3eb1e6c4b29eb7c9c2a0d9c069d62dc265c..8bd3ea0d96a18a95cee4bc014a8d55075bfcb7d1 100644 --- a/paddle/cinn/common/cas.cc +++ b/paddle/cinn/common/cas.cc @@ -123,10 +123,11 @@ Expr ProductGetNonConstantPart(Expr u) { } if (nonconstant_operands.empty()) { return make_const(u->type(), 1); - } else if (nonconstant_operands.size() == 1) + } else if (nonconstant_operands.size() == 1) { return nonconstant_operands.front(); - else + } else { return Product::Make(nonconstant_operands); + } } return u; } diff --git a/paddle/cinn/frontend/decomposer_registry.h b/paddle/cinn/frontend/decomposer_registry.h index bbad4864f4809e4189ca971dd8033e70350617bc..258c81e33500834c76e50ee14fb829914a88f905 100644 --- a/paddle/cinn/frontend/decomposer_registry.h +++ b/paddle/cinn/frontend/decomposer_registry.h @@ -33,7 +33,7 @@ class DecomposerContext { NetBuilder* builder, absl::flat_hash_map* var_map) : builder_(builder), var_map_(var_map) {} - NetBuilder* builder() const { return builder_; }; + NetBuilder* builder() const { return builder_; } // Map the new var to the original var. void MapOutToOrigin(const Variable& new_var, const Variable& ori_var) const { diff --git a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc index ee408a300eb9f7c3f85bb45319ae26471fb55077..c7173e4b9f1517a815f4f0248bc246e09e5c826d 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc @@ -56,12 +56,12 @@ EXPAND_ELTWISETYPE_STRING(kMin, " min ") template struct OpBuilder {}; -#define ELTWISE_SPEC(enum_t, function) \ - template <> \ - struct OpBuilder { \ - constexpr static Variable (NetBuilder::*func)(const Variable&, \ - const Variable&, \ - int){&function}; \ +#define ELTWISE_SPEC(enum_t, function) \ + template <> \ + struct OpBuilder { \ + constexpr static Variable (NetBuilder::*func)(const Variable&, \ + const Variable&, \ + int){&function}; /*NOLINT*/ \ } ELTWISE_SPEC(EltwiseType::kAdd, NetBuilder::Add); ELTWISE_SPEC(EltwiseType::kDiv, NetBuilder::Divide); diff --git a/paddle/cinn/frontend/paddle/cpp/op_desc.cc b/paddle/cinn/frontend/paddle/cpp/op_desc.cc index 35c790999f903cffeb5f9638e5cb8863e985629f..39fd1bb87d122ee40493b46b7ee3669afd7dad57 100644 --- a/paddle/cinn/frontend/paddle/cpp/op_desc.cc +++ b/paddle/cinn/frontend/paddle/cpp/op_desc.cc @@ -40,7 +40,7 @@ inline std::string AttrTypeToString( EXPAND_SWITCH_CASE(SCALAR) EXPAND_SWITCH_CASE(SCALARS) #undef EXPAND_SWITCH_CASE - }; + } return "Invlid AttrType"; } diff --git a/paddle/cinn/hlir/op/elementwise.cc b/paddle/cinn/hlir/op/elementwise.cc index 04c89093de3da2d72fe2024e2a308ea590e87667..0294fa6cf6f8a43495c448a6749d43475e02fdb1 100644 --- a/paddle/cinn/hlir/op/elementwise.cc +++ b/paddle/cinn/hlir/op/elementwise.cc @@ -467,11 +467,11 @@ std::shared_ptr StrategyForAssignValue( absl::optional out; #define EXPAND_VALUE_TO_TENSOR(TYPE) \ - else if (absl::get_if(&value)) { \ + else if (absl::get_if(&value)) { /*NOLINT*/ \ out = pe::AssignValue( \ std::vector{absl::get(value)}, out_type[0], tensor_name); \ } \ - else if (absl::get_if>(&value)) { \ + else if (absl::get_if>(&value)) { /*NOLINT*/ \ out = pe::AssignValue( \ absl::get>(value), out_type[0], tensor_name); \ } @@ -479,7 +479,7 @@ std::shared_ptr StrategyForAssignValue( if (false) { // NOLINT } EXPAND_ATTR_TYPE(EXPAND_VALUE_TO_TENSOR) - else { + else { // NOLINT LOG(FATAL) << "Assign value not support the type " << out_type[0]; } #undef EXPAND_VALUE_TO_TENSOR @@ -510,17 +510,17 @@ std::vector InferShapeForAssignValue( shape_t shape; #define EXPAND_ATTR_TO_GET_SHAPE(TYPE) \ - else if (absl::get_if(&value)) { \ + else if (absl::get_if(&value)) { /*NOLINT*/ \ shape.emplace_back(1); \ } \ - else if (absl::get_if>(&value)) { \ + else if (absl::get_if>(&value)) { /*NOLINT*/ \ shape.emplace_back(absl::get>(value).size()); \ } if (false) { // NOLINT } EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_SHAPE) - else { + else { // NOLINT LOG(FATAL) << "assign_value not support the type!"; } #undef EXPAND_ATTR_TO_GET_SHAPE @@ -550,18 +550,18 @@ std::vector InferDtypeForAssignValue( << "assign_value should set attribute [values]! Please check."; const auto &value = attrs.at("values"); -#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \ - else if (absl::get_if(&value)) { \ - out_type = common::type_of(); \ - } \ - else if (absl::get_if>(&value)) { \ - out_type = common::type_of(); \ +#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \ + else if (absl::get_if(&value)) { /*NOLINT*/ \ + out_type = common::type_of(); \ + } \ + else if (absl::get_if>(&value)) { /*NOLINT*/ \ + out_type = common::type_of(); \ } if (false) { // NOLINT } EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_DTYPE) - else { + else { // NOLINT LOG(FATAL) << "assign_value not support the type!"; } #undef EXPAND_ATTR_TO_GET_DTYPE diff --git a/paddle/cinn/hlir/pass/dot_merger.cc b/paddle/cinn/hlir/pass/dot_merger.cc index d241d78815cf3b1851bba7d2d163b54f0d417843..30cc12c083676e015fc6461eee93a53ecf675ff4 100644 --- a/paddle/cinn/hlir/pass/dot_merger.cc +++ b/paddle/cinn/hlir/pass/dot_merger.cc @@ -126,8 +126,8 @@ class DotBuilder { shape_dict_{graph_->GetMutableAttrs("infershape")} {} framework::Graph* graph() const { return graph_; } - const dtype_dict_t& dtype_dict() const { return dtype_dict_; }; - const shape_dict_t& shape_dict() const { return shape_dict_; }; + const dtype_dict_t& dtype_dict() const { return dtype_dict_; } + const shape_dict_t& shape_dict() const { return shape_dict_; } // Currently the constructor of `NodeData` needs to pass in `Shared`. NodeData* Var(common::Shared& producer) { diff --git a/paddle/cinn/hlir/pass/fusion_merge_pass_util.h b/paddle/cinn/hlir/pass/fusion_merge_pass_util.h index 108623eb2a4005a76355ae7a53673fb2e4416b71..73e83ee31f39ef4b5a42bfca5975da76a8bfa559 100644 --- a/paddle/cinn/hlir/pass/fusion_merge_pass_util.h +++ b/paddle/cinn/hlir/pass/fusion_merge_pass_util.h @@ -77,7 +77,7 @@ bool is_const_group(const FusionHelperBase* helper, const std::shared_ptr& group) { return group->CollectNodes().size() == 1 && helper->IsConstOp(group->CollectNodes()[0]); -}; +} CONDITION_FUNC(elementwise_fuse_broadcast) { // if producer just include const op. diff --git a/paddle/cinn/hlir/pe/reduction.cc b/paddle/cinn/hlir/pe/reduction.cc index d4e2daa893a260caa944da852b37fb92579ef6d1..2feac6104b4341fd4a0913c4de8b4fc61f65ea14 100644 --- a/paddle/cinn/hlir/pe/reduction.cc +++ b/paddle/cinn/hlir/pe/reduction.cc @@ -759,8 +759,9 @@ std::vector ReduceInternal(const ir::Tensor& A, A, axes, keep_dim, output_name, Reduce##name, initial, reduce_type); \ if (rs.size() == 0) { \ return {Reduce##name(A, axes, keep_dim, output_name)}; \ - } else \ + } else { \ return rs; \ + } \ } \ } @@ -801,7 +802,7 @@ bool WithoutLastDimInReduce(const std::vector& inshape, } else { return false; } -}; +} using BlockReduceFunc = std::function(const ir::Tensor&, diff --git a/paddle/cinn/hlir/pe/schedule.cc b/paddle/cinn/hlir/pe/schedule.cc index 6e7e571f53aba110a67f547c372cf2978c32e11c..39dfd2360cca19cfbb64d91f1a14a4a08b341ab1 100644 --- a/paddle/cinn/hlir/pe/schedule.cc +++ b/paddle/cinn/hlir/pe/schedule.cc @@ -2814,8 +2814,9 @@ void CudaSplitSchedule(common::CINNValuePack *arg_pack, stages[last_output]->Bind(0, "blockIdx.x"); stages[last_output]->Bind(1, "threadIdx.x"); compute_at_level++; - } else + } else { stages[last_output]->Bind(0, "threadIdx.x"); + } } for (int i = 0; i < out_tensors.size() - 1; i++) { diff --git a/paddle/cinn/ir/ir_schedule.cc b/paddle/cinn/ir/ir_schedule.cc index b7e4946fa2b26ab5a7fa3632d42a526b12de6693..0214c83bb3cd72e775e5bbb424166ba95b352374 100644 --- a/paddle/cinn/ir/ir_schedule.cc +++ b/paddle/cinn/ir/ir_schedule.cc @@ -2142,8 +2142,9 @@ void ScheduleImpl::CopyTransformAndLoopInfo(const Expr& block, !vars[i]->is_reduce_axis && !vars_target[i]->is_reduce_axis) { new_iter_values.push_back(iter_values_target[i]); VLOG(3) << "new_iter_values.push_back " << iter_values_target[i]; - } else + } else { break; + } } if (new_iter_values.empty()) diff --git a/paddle/cinn/runtime/cuda/cuda_util.cc b/paddle/cinn/runtime/cuda/cuda_util.cc index a753db5856151a809a3de2074986447fdff153b3..331e67868954dc8d4ea756d2f6ee5c4dcd234549 100644 --- a/paddle/cinn/runtime/cuda/cuda_util.cc +++ b/paddle/cinn/runtime/cuda/cuda_util.cc @@ -665,7 +665,7 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) { return "NHWC"; default: LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; - }; + } return ""; } @@ -681,7 +681,7 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) { return "float64"; default: LOG(FATAL) << "Only support float16/bfloat16/float32/float64 now!"; - }; + } return ""; } @@ -697,7 +697,7 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) { return "avg_exclulude_padding"; default: LOG(FATAL) << "Pool only support max and avg now!"; - }; + } return ""; } diff --git a/paddle/cinn/utils/event.h b/paddle/cinn/utils/event.h index dad99634bf41e5fefba26b2101c42dc8cfb6cb0e..a87dcf4828e0e9f322ba91f4ab7dbed27a87f37b 100644 --- a/paddle/cinn/utils/event.h +++ b/paddle/cinn/utils/event.h @@ -69,7 +69,7 @@ class Summary { public: struct Raito { double value; - Raito(double val) : value(val){}; + Raito(double val) : value(val) {} std::string ToStr() const { return std::to_string(value); } }; diff --git a/paddle/cinn/utils/string.cc b/paddle/cinn/utils/string.cc index 8b229a263d51e479a07b9d146005851ecfa6a638..c802cedfa5c2eb732db131c3d9e412045f9e2ced 100644 --- a/paddle/cinn/utils/string.cc +++ b/paddle/cinn/utils/string.cc @@ -112,8 +112,9 @@ size_t Count(std::string *s, const std::string &sub) { !IsSuffix(s->at(pos + sub.length())))) { pos += sub.length(); times++; - } else + } else { pos++; + } } return times; }