Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
3ad78428
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3ad78428
编写于
7月 02, 2023
作者:
W
Wang Xin
提交者:
GitHub
7月 02, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[CodeStyle][CINN] fix cpplint codestyle `[readability/braces]` (#55049)
Co-authored-by:
N
SigureMo
<
sigure.qaq@gmail.com
>
上级
cc7d1f34
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
40 addition
and
35 deletion
+40
-35
paddle/cinn/common/cas.cc
paddle/cinn/common/cas.cc
+3
-2
paddle/cinn/frontend/decomposer_registry.h
paddle/cinn/frontend/decomposer_registry.h
+1
-1
paddle/cinn/frontend/op_mappers/paddle/elementwise.cc
paddle/cinn/frontend/op_mappers/paddle/elementwise.cc
+6
-6
paddle/cinn/frontend/paddle/cpp/op_desc.cc
paddle/cinn/frontend/paddle/cpp/op_desc.cc
+1
-1
paddle/cinn/hlir/op/elementwise.cc
paddle/cinn/hlir/op/elementwise.cc
+13
-13
paddle/cinn/hlir/pass/dot_merger.cc
paddle/cinn/hlir/pass/dot_merger.cc
+2
-2
paddle/cinn/hlir/pass/fusion_merge_pass_util.h
paddle/cinn/hlir/pass/fusion_merge_pass_util.h
+1
-1
paddle/cinn/hlir/pe/reduction.cc
paddle/cinn/hlir/pe/reduction.cc
+3
-2
paddle/cinn/hlir/pe/schedule.cc
paddle/cinn/hlir/pe/schedule.cc
+2
-1
paddle/cinn/ir/ir_schedule.cc
paddle/cinn/ir/ir_schedule.cc
+2
-1
paddle/cinn/runtime/cuda/cuda_util.cc
paddle/cinn/runtime/cuda/cuda_util.cc
+3
-3
paddle/cinn/utils/event.h
paddle/cinn/utils/event.h
+1
-1
paddle/cinn/utils/string.cc
paddle/cinn/utils/string.cc
+2
-1
未找到文件。
paddle/cinn/common/cas.cc
浏览文件 @
3ad78428
...
...
@@ -123,10 +123,11 @@ Expr ProductGetNonConstantPart(Expr u) {
}
if
(
nonconstant_operands
.
empty
())
{
return
make_const
(
u
->
type
(),
1
);
}
else
if
(
nonconstant_operands
.
size
()
==
1
)
}
else
if
(
nonconstant_operands
.
size
()
==
1
)
{
return
nonconstant_operands
.
front
();
else
}
else
{
return
Product
::
Make
(
nonconstant_operands
);
}
}
return
u
;
}
...
...
paddle/cinn/frontend/decomposer_registry.h
浏览文件 @
3ad78428
...
...
@@ -33,7 +33,7 @@ class DecomposerContext {
NetBuilder
*
builder
,
absl
::
flat_hash_map
<
std
::
string
,
Variable
>*
var_map
)
:
builder_
(
builder
),
var_map_
(
var_map
)
{}
NetBuilder
*
builder
()
const
{
return
builder_
;
}
;
NetBuilder
*
builder
()
const
{
return
builder_
;
}
// Map the new var to the original var.
void
MapOutToOrigin
(
const
Variable
&
new_var
,
const
Variable
&
ori_var
)
const
{
...
...
paddle/cinn/frontend/op_mappers/paddle/elementwise.cc
浏览文件 @
3ad78428
...
...
@@ -56,12 +56,12 @@ EXPAND_ELTWISETYPE_STRING(kMin, " min ")
template
<
EltwiseType
Type
>
struct
OpBuilder
{};
#define ELTWISE_SPEC(enum_t, function) \
template <> \
struct OpBuilder<enum_t> { \
constexpr static Variable (NetBuilder::*func)(const Variable&, \
const Variable&, \
int){&function}; \
#define ELTWISE_SPEC(enum_t, function)
\
template <>
\
struct OpBuilder<enum_t> {
\
constexpr static Variable (NetBuilder::*func)(const Variable&,
\
const Variable&,
\
int){&function};
/*NOLINT*/
\
}
ELTWISE_SPEC
(
EltwiseType
::
kAdd
,
NetBuilder
::
Add
);
ELTWISE_SPEC
(
EltwiseType
::
kDiv
,
NetBuilder
::
Divide
);
...
...
paddle/cinn/frontend/paddle/cpp/op_desc.cc
浏览文件 @
3ad78428
...
...
@@ -40,7 +40,7 @@ inline std::string AttrTypeToString(
EXPAND_SWITCH_CASE
(
SCALAR
)
EXPAND_SWITCH_CASE
(
SCALARS
)
#undef EXPAND_SWITCH_CASE
}
;
}
return
"Invlid AttrType"
;
}
...
...
paddle/cinn/hlir/op/elementwise.cc
浏览文件 @
3ad78428
...
...
@@ -467,11 +467,11 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue(
absl
::
optional
<
ir
::
Tensor
>
out
;
#define EXPAND_VALUE_TO_TENSOR(TYPE) \
else if (absl::get_if<TYPE>(&value)) {
\
else if (absl::get_if<TYPE>(&value)) {
/*NOLINT*/
\
out = pe::AssignValue( \
std::vector<TYPE>{absl::get<TYPE>(value)}, out_type[0], tensor_name); \
} \
else if (absl::get_if<std::vector<TYPE>>(&value)) {
\
else if (absl::get_if<std::vector<TYPE>>(&value)) {
/*NOLINT*/
\
out = pe::AssignValue( \
absl::get<std::vector<TYPE>>(value), out_type[0], tensor_name); \
}
...
...
@@ -479,7 +479,7 @@ std::shared_ptr<OpStrategy> StrategyForAssignValue(
if
(
false
)
{
// NOLINT
}
EXPAND_ATTR_TYPE
(
EXPAND_VALUE_TO_TENSOR
)
else
{
else
{
// NOLINT
LOG
(
FATAL
)
<<
"Assign value not support the type "
<<
out_type
[
0
];
}
#undef EXPAND_VALUE_TO_TENSOR
...
...
@@ -510,17 +510,17 @@ std::vector<shape_t> InferShapeForAssignValue(
shape_t
shape
;
#define EXPAND_ATTR_TO_GET_SHAPE(TYPE) \
else if (absl::get_if<TYPE>(&value)) {
\
else if (absl::get_if<TYPE>(&value)) {
/*NOLINT*/
\
shape.emplace_back(1); \
} \
else if (absl::get_if<std::vector<TYPE>>(&value)) {
\
else if (absl::get_if<std::vector<TYPE>>(&value)) {
/*NOLINT*/
\
shape.emplace_back(absl::get<std::vector<TYPE>>(value).size()); \
}
if
(
false
)
{
// NOLINT
}
EXPAND_ATTR_TYPE
(
EXPAND_ATTR_TO_GET_SHAPE
)
else
{
else
{
// NOLINT
LOG
(
FATAL
)
<<
"assign_value not support the type!"
;
}
#undef EXPAND_ATTR_TO_GET_SHAPE
...
...
@@ -550,18 +550,18 @@ std::vector<Type> InferDtypeForAssignValue(
<<
"assign_value should set attribute [values]! Please check."
;
const
auto
&
value
=
attrs
.
at
(
"values"
);
#define EXPAND_ATTR_TO_GET_DTYPE(TYPE) \
else if (absl::get_if<TYPE>(&value)) { \
out_type = common::type_of<TYPE>(); \
} \
else if (absl::get_if<std::vector<TYPE>>(&value)) { \
out_type = common::type_of<TYPE>(); \
#define EXPAND_ATTR_TO_GET_DTYPE(TYPE)
\
else if (absl::get_if<TYPE>(&value)) {
/*NOLINT*/
\
out_type = common::type_of<TYPE>();
\
}
\
else if (absl::get_if<std::vector<TYPE>>(&value)) {
/*NOLINT*/
\
out_type = common::type_of<TYPE>();
\
}
if
(
false
)
{
// NOLINT
}
EXPAND_ATTR_TYPE
(
EXPAND_ATTR_TO_GET_DTYPE
)
else
{
else
{
// NOLINT
LOG
(
FATAL
)
<<
"assign_value not support the type!"
;
}
#undef EXPAND_ATTR_TO_GET_DTYPE
...
...
paddle/cinn/hlir/pass/dot_merger.cc
浏览文件 @
3ad78428
...
...
@@ -126,8 +126,8 @@ class DotBuilder {
shape_dict_
{
graph_
->
GetMutableAttrs
<
shape_dict_t
>
(
"infershape"
)}
{}
framework
::
Graph
*
graph
()
const
{
return
graph_
;
}
const
dtype_dict_t
&
dtype_dict
()
const
{
return
dtype_dict_
;
}
;
const
shape_dict_t
&
shape_dict
()
const
{
return
shape_dict_
;
}
;
const
dtype_dict_t
&
dtype_dict
()
const
{
return
dtype_dict_
;
}
const
shape_dict_t
&
shape_dict
()
const
{
return
shape_dict_
;
}
// Currently the constructor of `NodeData` needs to pass in `Shared<Node>`.
NodeData
*
Var
(
common
::
Shared
<
Node
>&
producer
)
{
...
...
paddle/cinn/hlir/pass/fusion_merge_pass_util.h
浏览文件 @
3ad78428
...
...
@@ -77,7 +77,7 @@ bool is_const_group(const FusionHelperBase* helper,
const
std
::
shared_ptr
<
Graph
::
Group
>&
group
)
{
return
group
->
CollectNodes
().
size
()
==
1
&&
helper
->
IsConstOp
(
group
->
CollectNodes
()[
0
]);
}
;
}
CONDITION_FUNC
(
elementwise_fuse_broadcast
)
{
// if producer just include const op.
...
...
paddle/cinn/hlir/pe/reduction.cc
浏览文件 @
3ad78428
...
...
@@ -759,8 +759,9 @@ std::vector<ir::Tensor> ReduceInternal(const ir::Tensor& A,
A, axes, keep_dim, output_name, Reduce##name, initial, reduce_type); \
if (rs.size() == 0) { \
return {Reduce##name(A, axes, keep_dim, output_name)}; \
} else
\
} else
{
\
return rs; \
} \
} \
}
...
...
@@ -801,7 +802,7 @@ bool WithoutLastDimInReduce(const std::vector<ir::Expr>& inshape,
}
else
{
return
false
;
}
}
;
}
using
BlockReduceFunc
=
std
::
function
<
std
::
vector
<
ir
::
Tensor
>
(
const
ir
::
Tensor
&
,
...
...
paddle/cinn/hlir/pe/schedule.cc
浏览文件 @
3ad78428
...
...
@@ -2814,8 +2814,9 @@ void CudaSplitSchedule(common::CINNValuePack *arg_pack,
stages
[
last_output
]
->
Bind
(
0
,
"blockIdx.x"
);
stages
[
last_output
]
->
Bind
(
1
,
"threadIdx.x"
);
compute_at_level
++
;
}
else
}
else
{
stages
[
last_output
]
->
Bind
(
0
,
"threadIdx.x"
);
}
}
for
(
int
i
=
0
;
i
<
out_tensors
.
size
()
-
1
;
i
++
)
{
...
...
paddle/cinn/ir/ir_schedule.cc
浏览文件 @
3ad78428
...
...
@@ -2142,8 +2142,9 @@ void ScheduleImpl::CopyTransformAndLoopInfo(const Expr& block,
!
vars
[
i
]
->
is_reduce_axis
&&
!
vars_target
[
i
]
->
is_reduce_axis
)
{
new_iter_values
.
push_back
(
iter_values_target
[
i
]);
VLOG
(
3
)
<<
"new_iter_values.push_back "
<<
iter_values_target
[
i
];
}
else
}
else
{
break
;
}
}
if
(
new_iter_values
.
empty
())
...
...
paddle/cinn/runtime/cuda/cuda_util.cc
浏览文件 @
3ad78428
...
...
@@ -665,7 +665,7 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) {
return
"NHWC"
;
default:
LOG
(
FATAL
)
<<
"Only support NCHW and NHWC data layout
\n
"
;
}
;
}
return
""
;
}
...
...
@@ -681,7 +681,7 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) {
return
"float64"
;
default:
LOG
(
FATAL
)
<<
"Only support float16/bfloat16/float32/float64 now!"
;
}
;
}
return
""
;
}
...
...
@@ -697,7 +697,7 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) {
return
"avg_exclulude_padding"
;
default:
LOG
(
FATAL
)
<<
"Pool only support max and avg now!"
;
}
;
}
return
""
;
}
...
...
paddle/cinn/utils/event.h
浏览文件 @
3ad78428
...
...
@@ -69,7 +69,7 @@ class Summary {
public:
struct
Raito
{
double
value
;
Raito
(
double
val
)
:
value
(
val
)
{};
Raito
(
double
val
)
:
value
(
val
)
{}
std
::
string
ToStr
()
const
{
return
std
::
to_string
(
value
);
}
};
...
...
paddle/cinn/utils/string.cc
浏览文件 @
3ad78428
...
...
@@ -112,8 +112,9 @@ size_t Count(std::string *s, const std::string &sub) {
!
IsSuffix
(
s
->
at
(
pos
+
sub
.
length
()))))
{
pos
+=
sub
.
length
();
times
++
;
}
else
}
else
{
pos
++
;
}
}
return
times
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录