未验证 提交 3ad78428 编写于 作者: W Wang Xin 提交者: GitHub

[CodeStyle][CINN] fix cpplint codestyle `[readability/braces]` (#55049)

Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 cc7d1f34
...@@ -123,10 +123,11 @@ Expr ProductGetNonConstantPart(Expr u) { ...@@ -123,10 +123,11 @@ Expr ProductGetNonConstantPart(Expr u) {
} }
if (nonconstant_operands.empty()) { if (nonconstant_operands.empty()) {
return make_const(u->type(), 1); return make_const(u->type(), 1);
} else if (nonconstant_operands.size() == 1) } else if (nonconstant_operands.size() == 1) {
return nonconstant_operands.front(); return nonconstant_operands.front();
else } else {
return Product::Make(nonconstant_operands); return Product::Make(nonconstant_operands);
}
} }
return u; return u;
} }
......
...@@ -33,7 +33,7 @@ class DecomposerContext { ...@@ -33,7 +33,7 @@ class DecomposerContext {
NetBuilder* builder, absl::flat_hash_map<std::string, Variable>* var_map) NetBuilder* builder, absl::flat_hash_map<std::string, Variable>* var_map)
: builder_(builder), var_map_(var_map) {} : builder_(builder), var_map_(var_map) {}
NetBuilder* builder() const { return builder_; }; NetBuilder* builder() const { return builder_; }
// Map the new var to the original var. // Map the new var to the original var.
void MapOutToOrigin(const Variable& new_var, const Variable& ori_var) const { void MapOutToOrigin(const Variable& new_var, const Variable& ori_var) const {
......
...@@ -56,12 +56,12 @@ EXPAND_ELTWISETYPE_STRING(kMin, " min ") ...@@ -56,12 +56,12 @@ EXPAND_ELTWISETYPE_STRING(kMin, " min ")
template <EltwiseType Type> template <EltwiseType Type>
struct OpBuilder {}; struct OpBuilder {};
#define ELTWISE_SPEC(enum_t, function) \ #define ELTWISE_SPEC(enum_t, function) \
template <> \ template <> \
struct OpBuilder<enum_t> { \ struct OpBuilder<enum_t> { \
constexpr static Variable (NetBuilder::*func)(const Variable&, \ constexpr static Variable (NetBuilder::*func)(const Variable&, \
const Variable&, \ const Variable&, \
int){&function}; \ int){&function}; /*NOLINT*/ \
} }
ELTWISE_SPEC(EltwiseType::kAdd, NetBuilder::Add); ELTWISE_SPEC(EltwiseType::kAdd, NetBuilder::Add);
ELTWISE_SPEC(EltwiseType::kDiv, NetBuilder::Divide); ELTWISE_SPEC(EltwiseType::kDiv, NetBuilder::Divide);
......
...@@ -40,7 +40,7 @@ inline std::string AttrTypeToString( ...@@ -40,7 +40,7 @@ inline std::string AttrTypeToString(
EXPAND_SWITCH_CASE(SCALAR) EXPAND_SWITCH_CASE(SCALAR)
EXPAND_SWITCH_CASE(SCALARS) EXPAND_SWITCH_CASE(SCALARS)
#undef EXPAND_SWITCH_CASE #undef EXPAND_SWITCH_CASE
}; }
return "Invlid AttrType"; return "Invlid AttrType";
} }
......
...@@ -467,11 +467,11 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue( ...@@ -467,11 +467,11 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue(
absl::optional<ir::Tensor> out; absl::optional<ir::Tensor> out;
#define EXPAND_VALUE_TO_TENSOR(TYPE) \ #define EXPAND_VALUE_TO_TENSOR(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \ else if (absl::get_if<TYPE>(&value)) { /*NOLINT*/ \
out = pe::AssignValue( \ out = pe::AssignValue( \
std::vector<TYPE>{absl::get<TYPE>(value)}, out_type[0], tensor_name); \ std::vector<TYPE>{absl::get<TYPE>(value)}, out_type[0], tensor_name); \
} \ } \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \ else if (absl::get_if<std::vector<TYPE>>(&value)) { /*NOLINT*/ \
out = pe::AssignValue( \ out = pe::AssignValue( \
absl::get<std::vector<TYPE>>(value), out_type[0], tensor_name); \ absl::get<std::vector<TYPE>>(value), out_type[0], tensor_name); \
} }
...@@ -479,7 +479,7 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue( ...@@ -479,7 +479,7 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue(
if (false) { // NOLINT if (false) { // NOLINT
} }
EXPAND_ATTR_TYPE(EXPAND_VALUE_TO_TENSOR) EXPAND_ATTR_TYPE(EXPAND_VALUE_TO_TENSOR)
else { else { // NOLINT
LOG(FATAL) << "Assign value not support the type " << out_type[0]; LOG(FATAL) << "Assign value not support the type " << out_type[0];
} }
#undef EXPAND_VALUE_TO_TENSOR #undef EXPAND_VALUE_TO_TENSOR
...@@ -510,17 +510,17 @@ std::vector<shape_t> InferShapeForAssignValue( ...@@ -510,17 +510,17 @@ std::vector<shape_t> InferShapeForAssignValue(
shape_t shape; shape_t shape;
#define EXPAND_ATTR_TO_GET_SHAPE(TYPE) \ #define EXPAND_ATTR_TO_GET_SHAPE(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \ else if (absl::get_if<TYPE>(&value)) { /*NOLINT*/ \
shape.emplace_back(1); \ shape.emplace_back(1); \
} \ } \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \ else if (absl::get_if<std::vector<TYPE>>(&value)) { /*NOLINT*/ \
shape.emplace_back(absl::get<std::vector<TYPE>>(value).size()); \ shape.emplace_back(absl::get<std::vector<TYPE>>(value).size()); \
} }
if (false) { // NOLINT if (false) { // NOLINT
} }
EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_SHAPE) EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_SHAPE)
else { else { // NOLINT
LOG(FATAL) << "assign_value not support the type!"; LOG(FATAL) << "assign_value not support the type!";
} }
#undef EXPAND_ATTR_TO_GET_SHAPE #undef EXPAND_ATTR_TO_GET_SHAPE
...@@ -550,18 +550,18 @@ std::vector<Type> InferDtypeForAssignValue( ...@@ -550,18 +550,18 @@ std::vector<Type> InferDtypeForAssignValue(
<< "assign_value should set attribute [values]! Please check."; << "assign_value should set attribute [values]! Please check.";
const auto &value = attrs.at("values"); const auto &value = attrs.at("values");
#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \ #define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \ else if (absl::get_if<TYPE>(&value)) { /*NOLINT*/ \
out_type = common::type_of<TYPE>(); \ out_type = common::type_of<TYPE>(); \
} \ } \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \ else if (absl::get_if<std::vector<TYPE>>(&value)) { /*NOLINT*/ \
out_type = common::type_of<TYPE>(); \ out_type = common::type_of<TYPE>(); \
} }
if (false) { // NOLINT if (false) { // NOLINT
} }
EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_DTYPE) EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_DTYPE)
else { else { // NOLINT
LOG(FATAL) << "assign_value not support the type!"; LOG(FATAL) << "assign_value not support the type!";
} }
#undef EXPAND_ATTR_TO_GET_DTYPE #undef EXPAND_ATTR_TO_GET_DTYPE
......
...@@ -126,8 +126,8 @@ class DotBuilder { ...@@ -126,8 +126,8 @@ class DotBuilder {
shape_dict_{graph_->GetMutableAttrs<shape_dict_t>("infershape")} {} shape_dict_{graph_->GetMutableAttrs<shape_dict_t>("infershape")} {}
framework::Graph* graph() const { return graph_; } framework::Graph* graph() const { return graph_; }
const dtype_dict_t& dtype_dict() const { return dtype_dict_; }; const dtype_dict_t& dtype_dict() const { return dtype_dict_; }
const shape_dict_t& shape_dict() const { return shape_dict_; }; const shape_dict_t& shape_dict() const { return shape_dict_; }
// Currently the constructor of `NodeData` needs to pass in `Shared<Node>`. // Currently the constructor of `NodeData` needs to pass in `Shared<Node>`.
NodeData* Var(common::Shared<Node>& producer) { NodeData* Var(common::Shared<Node>& producer) {
......
...@@ -77,7 +77,7 @@ bool is_const_group(const FusionHelperBase* helper, ...@@ -77,7 +77,7 @@ bool is_const_group(const FusionHelperBase* helper,
const std::shared_ptr<Graph::Group>& group) { const std::shared_ptr<Graph::Group>& group) {
return group->CollectNodes().size() == 1 && return group->CollectNodes().size() == 1 &&
helper->IsConstOp(group->CollectNodes()[0]); helper->IsConstOp(group->CollectNodes()[0]);
}; }
CONDITION_FUNC(elementwise_fuse_broadcast) { CONDITION_FUNC(elementwise_fuse_broadcast) {
// if producer just include const op. // if producer just include const op.
......
...@@ -759,8 +759,9 @@ std::vector<ir::Tensor> ReduceInternal(const ir::Tensor& A, ...@@ -759,8 +759,9 @@ std::vector<ir::Tensor> ReduceInternal(const ir::Tensor& A,
A, axes, keep_dim, output_name, Reduce##name, initial, reduce_type); \ A, axes, keep_dim, output_name, Reduce##name, initial, reduce_type); \
if (rs.size() == 0) { \ if (rs.size() == 0) { \
return {Reduce##name(A, axes, keep_dim, output_name)}; \ return {Reduce##name(A, axes, keep_dim, output_name)}; \
} else \ } else { \
return rs; \ return rs; \
} \
} \ } \
} }
...@@ -801,7 +802,7 @@ bool WithoutLastDimInReduce(const std::vector<ir::Expr>& inshape, ...@@ -801,7 +802,7 @@ bool WithoutLastDimInReduce(const std::vector<ir::Expr>& inshape,
} else { } else {
return false; return false;
} }
}; }
using BlockReduceFunc = using BlockReduceFunc =
std::function<std::vector<ir::Tensor>(const ir::Tensor&, std::function<std::vector<ir::Tensor>(const ir::Tensor&,
......
...@@ -2814,8 +2814,9 @@ void CudaSplitSchedule(common::CINNValuePack *arg_pack, ...@@ -2814,8 +2814,9 @@ void CudaSplitSchedule(common::CINNValuePack *arg_pack,
stages[last_output]->Bind(0, "blockIdx.x"); stages[last_output]->Bind(0, "blockIdx.x");
stages[last_output]->Bind(1, "threadIdx.x"); stages[last_output]->Bind(1, "threadIdx.x");
compute_at_level++; compute_at_level++;
} else } else {
stages[last_output]->Bind(0, "threadIdx.x"); stages[last_output]->Bind(0, "threadIdx.x");
}
} }
for (int i = 0; i < out_tensors.size() - 1; i++) { for (int i = 0; i < out_tensors.size() - 1; i++) {
......
...@@ -2142,8 +2142,9 @@ void ScheduleImpl::CopyTransformAndLoopInfo(const Expr& block, ...@@ -2142,8 +2142,9 @@ void ScheduleImpl::CopyTransformAndLoopInfo(const Expr& block,
!vars[i]->is_reduce_axis && !vars_target[i]->is_reduce_axis) { !vars[i]->is_reduce_axis && !vars_target[i]->is_reduce_axis) {
new_iter_values.push_back(iter_values_target[i]); new_iter_values.push_back(iter_values_target[i]);
VLOG(3) << "new_iter_values.push_back " << iter_values_target[i]; VLOG(3) << "new_iter_values.push_back " << iter_values_target[i];
} else } else {
break; break;
}
} }
if (new_iter_values.empty()) if (new_iter_values.empty())
......
...@@ -665,7 +665,7 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) { ...@@ -665,7 +665,7 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) {
return "NHWC"; return "NHWC";
default: default:
LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; LOG(FATAL) << "Only support NCHW and NHWC data layout\n";
}; }
return ""; return "";
} }
...@@ -681,7 +681,7 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) { ...@@ -681,7 +681,7 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) {
return "float64"; return "float64";
default: default:
LOG(FATAL) << "Only support float16/bfloat16/float32/float64 now!"; LOG(FATAL) << "Only support float16/bfloat16/float32/float64 now!";
}; }
return ""; return "";
} }
...@@ -697,7 +697,7 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) { ...@@ -697,7 +697,7 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) {
return "avg_exclulude_padding"; return "avg_exclulude_padding";
default: default:
LOG(FATAL) << "Pool only support max and avg now!"; LOG(FATAL) << "Pool only support max and avg now!";
}; }
return ""; return "";
} }
......
...@@ -69,7 +69,7 @@ class Summary { ...@@ -69,7 +69,7 @@ class Summary {
public: public:
struct Raito { struct Raito {
double value; double value;
Raito(double val) : value(val){}; Raito(double val) : value(val) {}
std::string ToStr() const { return std::to_string(value); } std::string ToStr() const { return std::to_string(value); }
}; };
......
...@@ -112,8 +112,9 @@ size_t Count(std::string *s, const std::string &sub) { ...@@ -112,8 +112,9 @@ size_t Count(std::string *s, const std::string &sub) {
!IsSuffix(s->at(pos + sub.length())))) { !IsSuffix(s->at(pos + sub.length())))) {
pos += sub.length(); pos += sub.length();
times++; times++;
} else } else {
pos++; pos++;
}
} }
return times; return times;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册