未验证 提交 3ad78428 编写于 作者: W Wang Xin 提交者: GitHub

[CodeStyle][CINN] fix cpplint codestyle `[readability/braces]` (#55049)

Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 cc7d1f34
......@@ -123,11 +123,12 @@ Expr ProductGetNonConstantPart(Expr u) {
}
if (nonconstant_operands.empty()) {
return make_const(u->type(), 1);
} else if (nonconstant_operands.size() == 1)
} else if (nonconstant_operands.size() == 1) {
return nonconstant_operands.front();
else
} else {
return Product::Make(nonconstant_operands);
}
}
return u;
}
......
......@@ -33,7 +33,7 @@ class DecomposerContext {
NetBuilder* builder, absl::flat_hash_map<std::string, Variable>* var_map)
: builder_(builder), var_map_(var_map) {}
NetBuilder* builder() const { return builder_; };
NetBuilder* builder() const { return builder_; }
// Map the new var to the original var.
void MapOutToOrigin(const Variable& new_var, const Variable& ori_var) const {
......
......@@ -61,7 +61,7 @@ struct OpBuilder {};
struct OpBuilder<enum_t> { \
constexpr static Variable (NetBuilder::*func)(const Variable&, \
const Variable&, \
int){&function}; \
int){&function}; /*NOLINT*/ \
}
ELTWISE_SPEC(EltwiseType::kAdd, NetBuilder::Add);
ELTWISE_SPEC(EltwiseType::kDiv, NetBuilder::Divide);
......
......@@ -40,7 +40,7 @@ inline std::string AttrTypeToString(
EXPAND_SWITCH_CASE(SCALAR)
EXPAND_SWITCH_CASE(SCALARS)
#undef EXPAND_SWITCH_CASE
};
}
return "Invlid AttrType";
}
......
......@@ -467,11 +467,11 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue(
absl::optional<ir::Tensor> out;
#define EXPAND_VALUE_TO_TENSOR(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \
else if (absl::get_if<TYPE>(&value)) { /*NOLINT*/ \
out = pe::AssignValue( \
std::vector<TYPE>{absl::get<TYPE>(value)}, out_type[0], tensor_name); \
} \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \
else if (absl::get_if<std::vector<TYPE>>(&value)) { /*NOLINT*/ \
out = pe::AssignValue( \
absl::get<std::vector<TYPE>>(value), out_type[0], tensor_name); \
}
......@@ -479,7 +479,7 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue(
if (false) { // NOLINT
}
EXPAND_ATTR_TYPE(EXPAND_VALUE_TO_TENSOR)
else {
else { // NOLINT
LOG(FATAL) << "Assign value not support the type " << out_type[0];
}
#undef EXPAND_VALUE_TO_TENSOR
......@@ -510,17 +510,17 @@ std::vector<shape_t> InferShapeForAssignValue(
shape_t shape;
#define EXPAND_ATTR_TO_GET_SHAPE(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \
else if (absl::get_if<TYPE>(&value)) { /*NOLINT*/ \
shape.emplace_back(1); \
} \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \
else if (absl::get_if<std::vector<TYPE>>(&value)) { /*NOLINT*/ \
shape.emplace_back(absl::get<std::vector<TYPE>>(value).size()); \
}
if (false) { // NOLINT
}
EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_SHAPE)
else {
else { // NOLINT
LOG(FATAL) << "assign_value not support the type!";
}
#undef EXPAND_ATTR_TO_GET_SHAPE
......@@ -551,17 +551,17 @@ std::vector<Type> InferDtypeForAssignValue(
const auto &value = attrs.at("values");
#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \
else if (absl::get_if<TYPE>(&value)) { /*NOLINT*/ \
out_type = common::type_of<TYPE>(); \
} \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \
else if (absl::get_if<std::vector<TYPE>>(&value)) { /*NOLINT*/ \
out_type = common::type_of<TYPE>(); \
}
if (false) { // NOLINT
}
EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_DTYPE)
else {
else { // NOLINT
LOG(FATAL) << "assign_value not support the type!";
}
#undef EXPAND_ATTR_TO_GET_DTYPE
......
......@@ -126,8 +126,8 @@ class DotBuilder {
shape_dict_{graph_->GetMutableAttrs<shape_dict_t>("infershape")} {}
framework::Graph* graph() const { return graph_; }
const dtype_dict_t& dtype_dict() const { return dtype_dict_; };
const shape_dict_t& shape_dict() const { return shape_dict_; };
const dtype_dict_t& dtype_dict() const { return dtype_dict_; }
const shape_dict_t& shape_dict() const { return shape_dict_; }
// Currently the constructor of `NodeData` needs to pass in `Shared<Node>`.
NodeData* Var(common::Shared<Node>& producer) {
......
......@@ -77,7 +77,7 @@ bool is_const_group(const FusionHelperBase* helper,
const std::shared_ptr<Graph::Group>& group) {
return group->CollectNodes().size() == 1 &&
helper->IsConstOp(group->CollectNodes()[0]);
};
}
CONDITION_FUNC(elementwise_fuse_broadcast) {
// if producer just include const op.
......
......@@ -759,9 +759,10 @@ std::vector<ir::Tensor> ReduceInternal(const ir::Tensor& A,
A, axes, keep_dim, output_name, Reduce##name, initial, reduce_type); \
if (rs.size() == 0) { \
return {Reduce##name(A, axes, keep_dim, output_name)}; \
} else \
} else { \
return rs; \
} \
} \
}
BLOCK_SHUFFLE_REDUCE(Sum,
......@@ -801,7 +802,7 @@ bool WithoutLastDimInReduce(const std::vector<ir::Expr>& inshape,
} else {
return false;
}
};
}
using BlockReduceFunc =
std::function<std::vector<ir::Tensor>(const ir::Tensor&,
......
......@@ -2814,9 +2814,10 @@ void CudaSplitSchedule(common::CINNValuePack *arg_pack,
stages[last_output]->Bind(0, "blockIdx.x");
stages[last_output]->Bind(1, "threadIdx.x");
compute_at_level++;
} else
} else {
stages[last_output]->Bind(0, "threadIdx.x");
}
}
for (int i = 0; i < out_tensors.size() - 1; i++) {
stages[out_tensors[i]]->ComputeAt2(stages[last_output], compute_at_level);
......
......@@ -2142,9 +2142,10 @@ void ScheduleImpl::CopyTransformAndLoopInfo(const Expr& block,
!vars[i]->is_reduce_axis && !vars_target[i]->is_reduce_axis) {
new_iter_values.push_back(iter_values_target[i]);
VLOG(3) << "new_iter_values.push_back " << iter_values_target[i];
} else
} else {
break;
}
}
if (new_iter_values.empty())
LOG(FATAL) << "Cannot CopyTransformAndLoopInfo since shape[0] of source "
......
......@@ -665,7 +665,7 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) {
return "NHWC";
default:
LOG(FATAL) << "Only support NCHW and NHWC data layout\n";
};
}
return "";
}
......@@ -681,7 +681,7 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) {
return "float64";
default:
LOG(FATAL) << "Only support float16/bfloat16/float32/float64 now!";
};
}
return "";
}
......@@ -697,7 +697,7 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) {
return "avg_exclulude_padding";
default:
LOG(FATAL) << "Pool only support max and avg now!";
};
}
return "";
}
......
......@@ -69,7 +69,7 @@ class Summary {
public:
struct Raito {
double value;
Raito(double val) : value(val){};
Raito(double val) : value(val) {}
std::string ToStr() const { return std::to_string(value); }
};
......
......@@ -112,9 +112,10 @@ size_t Count(std::string *s, const std::string &sub) {
!IsSuffix(s->at(pos + sub.length())))) {
pos += sub.length();
times++;
} else
} else {
pos++;
}
}
return times;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册