From 3ad7842880cb5edd4c087095479bd81fe4f1da97 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Sun, 2 Jul 2023 17:03:54 +0800 Subject: [PATCH] [CodeStyle][CINN] fix cpplint codestyle `[readability/braces]` (#55049) Co-authored-by: SigureMo --- paddle/cinn/common/cas.cc | 5 ++-- paddle/cinn/frontend/decomposer_registry.h | 2 +- .../frontend/op_mappers/paddle/elementwise.cc | 12 ++++----- paddle/cinn/frontend/paddle/cpp/op_desc.cc | 2 +- paddle/cinn/hlir/op/elementwise.cc | 26 +++++++++---------- paddle/cinn/hlir/pass/dot_merger.cc | 4 +-- .../cinn/hlir/pass/fusion_merge_pass_util.h | 2 +- paddle/cinn/hlir/pe/reduction.cc | 5 ++-- paddle/cinn/hlir/pe/schedule.cc | 3 ++- paddle/cinn/ir/ir_schedule.cc | 3 ++- paddle/cinn/runtime/cuda/cuda_util.cc | 6 ++--- paddle/cinn/utils/event.h | 2 +- paddle/cinn/utils/string.cc | 3 ++- 13 files changed, 40 insertions(+), 35 deletions(-) diff --git a/paddle/cinn/common/cas.cc b/paddle/cinn/common/cas.cc index ab23d3eb1e6..8bd3ea0d96a 100644 --- a/paddle/cinn/common/cas.cc +++ b/paddle/cinn/common/cas.cc @@ -123,10 +123,11 @@ Expr ProductGetNonConstantPart(Expr u) { } if (nonconstant_operands.empty()) { return make_const(u->type(), 1); - } else if (nonconstant_operands.size() == 1) + } else if (nonconstant_operands.size() == 1) { return nonconstant_operands.front(); - else + } else { return Product::Make(nonconstant_operands); + } } return u; } diff --git a/paddle/cinn/frontend/decomposer_registry.h b/paddle/cinn/frontend/decomposer_registry.h index bbad4864f48..258c81e3350 100644 --- a/paddle/cinn/frontend/decomposer_registry.h +++ b/paddle/cinn/frontend/decomposer_registry.h @@ -33,7 +33,7 @@ class DecomposerContext { NetBuilder* builder, absl::flat_hash_map* var_map) : builder_(builder), var_map_(var_map) {} - NetBuilder* builder() const { return builder_; }; + NetBuilder* builder() const { return builder_; } // Map the new var to the original var. void MapOutToOrigin(const Variable& new_var, const Variable& ori_var) const { diff --git a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc index ee408a300eb..c7173e4b9f1 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc @@ -56,12 +56,12 @@ EXPAND_ELTWISETYPE_STRING(kMin, " min ") template struct OpBuilder {}; -#define ELTWISE_SPEC(enum_t, function) \ - template <> \ - struct OpBuilder { \ - constexpr static Variable (NetBuilder::*func)(const Variable&, \ - const Variable&, \ - int){&function}; \ +#define ELTWISE_SPEC(enum_t, function) \ + template <> \ + struct OpBuilder { \ + constexpr static Variable (NetBuilder::*func)(const Variable&, \ + const Variable&, \ + int){&function}; /*NOLINT*/ \ } ELTWISE_SPEC(EltwiseType::kAdd, NetBuilder::Add); ELTWISE_SPEC(EltwiseType::kDiv, NetBuilder::Divide); diff --git a/paddle/cinn/frontend/paddle/cpp/op_desc.cc b/paddle/cinn/frontend/paddle/cpp/op_desc.cc index 35c790999f9..39fd1bb87d1 100644 --- a/paddle/cinn/frontend/paddle/cpp/op_desc.cc +++ b/paddle/cinn/frontend/paddle/cpp/op_desc.cc @@ -40,7 +40,7 @@ inline std::string AttrTypeToString( EXPAND_SWITCH_CASE(SCALAR) EXPAND_SWITCH_CASE(SCALARS) #undef EXPAND_SWITCH_CASE - }; + } return "Invlid AttrType"; } diff --git a/paddle/cinn/hlir/op/elementwise.cc b/paddle/cinn/hlir/op/elementwise.cc index 04c89093de3..0294fa6cf6f 100644 --- a/paddle/cinn/hlir/op/elementwise.cc +++ b/paddle/cinn/hlir/op/elementwise.cc @@ -467,11 +467,11 @@ std::shared_ptr StrategyForAssignValue( absl::optional out; #define EXPAND_VALUE_TO_TENSOR(TYPE) \ - else if (absl::get_if(&value)) { \ + else if (absl::get_if(&value)) { /*NOLINT*/ \ out = pe::AssignValue( \ std::vector{absl::get(value)}, out_type[0], tensor_name); \ } \ - else if (absl::get_if>(&value)) { \ + else if (absl::get_if>(&value)) { /*NOLINT*/ \ out = pe::AssignValue( \ absl::get>(value), out_type[0], tensor_name); \ } @@ -479,7 +479,7 @@ std::shared_ptr StrategyForAssignValue( if (false) { // NOLINT } EXPAND_ATTR_TYPE(EXPAND_VALUE_TO_TENSOR) - else { + else { // NOLINT LOG(FATAL) << "Assign value not support the type " << out_type[0]; } #undef EXPAND_VALUE_TO_TENSOR @@ -510,17 +510,17 @@ std::vector InferShapeForAssignValue( shape_t shape; #define EXPAND_ATTR_TO_GET_SHAPE(TYPE) \ - else if (absl::get_if(&value)) { \ + else if (absl::get_if(&value)) { /*NOLINT*/ \ shape.emplace_back(1); \ } \ - else if (absl::get_if>(&value)) { \ + else if (absl::get_if>(&value)) { /*NOLINT*/ \ shape.emplace_back(absl::get>(value).size()); \ } if (false) { // NOLINT } EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_SHAPE) - else { + else { // NOLINT LOG(FATAL) << "assign_value not support the type!"; } #undef EXPAND_ATTR_TO_GET_SHAPE @@ -550,18 +550,18 @@ std::vector InferDtypeForAssignValue( << "assign_value should set attribute [values]! Please check."; const auto &value = attrs.at("values"); -#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \ - else if (absl::get_if(&value)) { \ - out_type = common::type_of(); \ - } \ - else if (absl::get_if>(&value)) { \ - out_type = common::type_of(); \ +#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \ + else if (absl::get_if(&value)) { /*NOLINT*/ \ + out_type = common::type_of(); \ + } \ + else if (absl::get_if>(&value)) { /*NOLINT*/ \ + out_type = common::type_of(); \ } if (false) { // NOLINT } EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_DTYPE) - else { + else { // NOLINT LOG(FATAL) << "assign_value not support the type!"; } #undef EXPAND_ATTR_TO_GET_DTYPE diff --git a/paddle/cinn/hlir/pass/dot_merger.cc b/paddle/cinn/hlir/pass/dot_merger.cc index d241d78815c..30cc12c0836 100644 --- a/paddle/cinn/hlir/pass/dot_merger.cc +++ b/paddle/cinn/hlir/pass/dot_merger.cc @@ -126,8 +126,8 @@ class DotBuilder { shape_dict_{graph_->GetMutableAttrs("infershape")} {} framework::Graph* graph() const { return graph_; } - const dtype_dict_t& dtype_dict() const { return dtype_dict_; }; - const shape_dict_t& shape_dict() const { return shape_dict_; }; + const dtype_dict_t& dtype_dict() const { return dtype_dict_; } + const shape_dict_t& shape_dict() const { return shape_dict_; } // Currently the constructor of `NodeData` needs to pass in `Shared`. NodeData* Var(common::Shared& producer) { diff --git a/paddle/cinn/hlir/pass/fusion_merge_pass_util.h b/paddle/cinn/hlir/pass/fusion_merge_pass_util.h index 108623eb2a4..73e83ee31f3 100644 --- a/paddle/cinn/hlir/pass/fusion_merge_pass_util.h +++ b/paddle/cinn/hlir/pass/fusion_merge_pass_util.h @@ -77,7 +77,7 @@ bool is_const_group(const FusionHelperBase* helper, const std::shared_ptr& group) { return group->CollectNodes().size() == 1 && helper->IsConstOp(group->CollectNodes()[0]); -}; +} CONDITION_FUNC(elementwise_fuse_broadcast) { // if producer just include const op. diff --git a/paddle/cinn/hlir/pe/reduction.cc b/paddle/cinn/hlir/pe/reduction.cc index d4e2daa893a..2feac6104b4 100644 --- a/paddle/cinn/hlir/pe/reduction.cc +++ b/paddle/cinn/hlir/pe/reduction.cc @@ -759,8 +759,9 @@ std::vector ReduceInternal(const ir::Tensor& A, A, axes, keep_dim, output_name, Reduce##name, initial, reduce_type); \ if (rs.size() == 0) { \ return {Reduce##name(A, axes, keep_dim, output_name)}; \ - } else \ + } else { \ return rs; \ + } \ } \ } @@ -801,7 +802,7 @@ bool WithoutLastDimInReduce(const std::vector& inshape, } else { return false; } -}; +} using BlockReduceFunc = std::function(const ir::Tensor&, diff --git a/paddle/cinn/hlir/pe/schedule.cc b/paddle/cinn/hlir/pe/schedule.cc index 6e7e571f53a..39dfd2360cc 100644 --- a/paddle/cinn/hlir/pe/schedule.cc +++ b/paddle/cinn/hlir/pe/schedule.cc @@ -2814,8 +2814,9 @@ void CudaSplitSchedule(common::CINNValuePack *arg_pack, stages[last_output]->Bind(0, "blockIdx.x"); stages[last_output]->Bind(1, "threadIdx.x"); compute_at_level++; - } else + } else { stages[last_output]->Bind(0, "threadIdx.x"); + } } for (int i = 0; i < out_tensors.size() - 1; i++) { diff --git a/paddle/cinn/ir/ir_schedule.cc b/paddle/cinn/ir/ir_schedule.cc index b7e4946fa2b..0214c83bb3c 100644 --- a/paddle/cinn/ir/ir_schedule.cc +++ b/paddle/cinn/ir/ir_schedule.cc @@ -2142,8 +2142,9 @@ void ScheduleImpl::CopyTransformAndLoopInfo(const Expr& block, !vars[i]->is_reduce_axis && !vars_target[i]->is_reduce_axis) { new_iter_values.push_back(iter_values_target[i]); VLOG(3) << "new_iter_values.push_back " << iter_values_target[i]; - } else + } else { break; + } } if (new_iter_values.empty()) diff --git a/paddle/cinn/runtime/cuda/cuda_util.cc b/paddle/cinn/runtime/cuda/cuda_util.cc index a753db58561..331e6786895 100644 --- a/paddle/cinn/runtime/cuda/cuda_util.cc +++ b/paddle/cinn/runtime/cuda/cuda_util.cc @@ -665,7 +665,7 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) { return "NHWC"; default: LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; - }; + } return ""; } @@ -681,7 +681,7 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) { return "float64"; default: LOG(FATAL) << "Only support float16/bfloat16/float32/float64 now!"; - }; + } return ""; } @@ -697,7 +697,7 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) { return "avg_exclulude_padding"; default: LOG(FATAL) << "Pool only support max and avg now!"; - }; + } return ""; } diff --git a/paddle/cinn/utils/event.h b/paddle/cinn/utils/event.h index dad99634bf4..a87dcf4828e 100644 --- a/paddle/cinn/utils/event.h +++ b/paddle/cinn/utils/event.h @@ -69,7 +69,7 @@ class Summary { public: struct Raito { double value; - Raito(double val) : value(val){}; + Raito(double val) : value(val) {} std::string ToStr() const { return std::to_string(value); } }; diff --git a/paddle/cinn/utils/string.cc b/paddle/cinn/utils/string.cc index 8b229a263d5..c802cedfa5c 100644 --- a/paddle/cinn/utils/string.cc +++ b/paddle/cinn/utils/string.cc @@ -112,8 +112,9 @@ size_t Count(std::string *s, const std::string &sub) { !IsSuffix(s->at(pos + sub.length())))) { pos += sub.length(); times++; - } else + } else { pos++; + } } return times; } -- GitLab