Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
259b0aad
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
259b0aad
编写于
4月 11, 2023
作者:
W
wz1qqx
提交者:
GitHub
4月 11, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[XPU] fix error pattern and rename max name (#52726)
上级
327c0e4d
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
63 addition
and
48 deletion
+63
-48
paddle/fluid/framework/ir/xpu/conv2d_xpu_fuse_pass.cc
paddle/fluid/framework/ir/xpu/conv2d_xpu_fuse_pass.cc
+36
-20
paddle/phi/api/yaml/fused_ops.yaml
paddle/phi/api/yaml/fused_ops.yaml
+4
-4
paddle/phi/infermeta/fusion.cc
paddle/phi/infermeta/fusion.cc
+7
-7
paddle/phi/infermeta/fusion.h
paddle/phi/infermeta/fusion.h
+4
-4
paddle/phi/kernels/fusion/xpu/conv2d_xpu_kernel.cc
paddle/phi/kernels/fusion/xpu/conv2d_xpu_kernel.cc
+12
-13
未找到文件。
paddle/fluid/framework/ir/xpu/conv2d_xpu_fuse_pass.cc
浏览文件 @
259b0aad
...
...
@@ -99,13 +99,15 @@ Conv2dXPUPattern::Conv2dXPUPattern(PDPattern* pattern,
auto
conv
=
pattern
->
NewNode
(
conv_repr
())
->
assert_is_op
(
conv_type_
);
auto
input
=
pattern
->
NewNode
(
input_repr
())
->
assert_is_op_input
(
conv_type_
,
"Input"
)
->
AsInput
();
->
AsInput
()
->
assert_more
([](
Node
*
node
)
{
return
node
->
Var
()
->
GetShape
().
size
()
==
4
;
});
auto
conv_filter
=
pattern
->
NewNode
(
conv_filter_repr
())
->
assert_is_op_input
(
conv_type_
,
"Filter"
)
->
AsInput
();
auto
conv_out
=
pattern
->
NewNode
(
conv_out_repr
())
->
assert_is_op_output
(
conv_type_
,
"Output"
)
->
assert_var_not_persistable
();
->
assert_is_op_output
(
conv_type_
,
"Output"
);
conv
->
LinksFrom
({
input
,
conv_filter
}).
LinksTo
({
conv_out
});
// ew_bias_add op
PDNode
*
ew_bias_add
=
nullptr
;
...
...
@@ -116,11 +118,17 @@ Conv2dXPUPattern::Conv2dXPUPattern(PDPattern* pattern,
ew_bias_add_y
=
pattern
->
NewNode
(
ew_bias_add_y_repr
())
->
assert_is_op_input
(
"elementwise_add"
,
"Y"
)
->
assert_is_persistable_var
()
->
assert_has_n_outputs
(
1
);
->
assert_has_n_outputs
(
1
)
->
assert_more
([](
Node
*
node
)
{
return
node
->
Var
()
->
GetShape
().
size
()
==
1
;
});
ew_bias_add
=
pattern
->
NewNode
(
ew_bias_add_repr
())
->
assert_is_op
(
"elementwise_add"
);
ew_bias_add_out
=
pattern
->
NewNode
(
ew_bias_add_out_repr
())
->
assert_is_op_output
(
"elementwise_add"
,
"Out"
);
if
(
with_bn_
||
with_branch_
||
!
act_type_
.
empty
())
{
ew_bias_add_out
->
assert_has_n_outputs
(
1
);
}
ew_bias_add
->
LinksFrom
({
conv_out
,
ew_bias_add_y
})
.
LinksTo
({
ew_bias_add_out
});
}
else
{
...
...
@@ -159,6 +167,9 @@ Conv2dXPUPattern::Conv2dXPUPattern(PDPattern* pattern,
bn
=
pattern
->
NewNode
(
bn_repr
())
->
assert_is_op
(
"batch_norm"
);
bn_out
=
pattern
->
NewNode
(
bn_out_repr
())
->
assert_is_op_output
(
"batch_norm"
,
"Y"
);
if
(
with_branch_
||
!
act_type_
.
empty
())
{
bn_out
->
assert_has_n_outputs
(
1
);
}
bn_mean_out
=
pattern
->
NewNode
(
bn_mean_out_repr
())
->
assert_is_op_output
(
"batch_norm"
,
"MeanOut"
);
bn_saved_mean
=
pattern
->
NewNode
(
bn_saved_mean_repr
())
...
...
@@ -179,23 +190,27 @@ Conv2dXPUPattern::Conv2dXPUPattern(PDPattern* pattern,
bn_out
->
assert_is_op_input
(
"elementwise_add"
,
"Y"
)
->
AsIntermediate
();
ew_branch_add_in
=
pattern
->
NewNode
(
ew_branch_add_in_repr
())
->
assert_is_op_input
(
"elementwise_add"
,
"X"
)
->
AsInput
()
->
assert_more
([](
Node
*
node
)
{
return
node
->
Var
()
->
GetShape
().
size
()
==
4
;
});
->
AsInput
();
}
else
if
(
with_branch_y_
)
{
bn_out
->
assert_is_op_input
(
"elementwise_add"
,
"X"
)
->
AsIntermediate
();
ew_branch_add_in
=
pattern
->
NewNode
(
ew_branch_add_in_repr
())
->
assert_is_op_input
(
"elementwise_add"
,
"Y"
)
->
AsInput
()
->
assert_more
([](
Node
*
node
)
{
return
node
->
Var
()
->
GetShape
().
size
()
==
4
;
});
->
AsInput
();
}
ew_branch_add
=
pattern
->
NewNode
(
ew_branch_add_repr
())
->
assert_is_op
(
"elementwise_add"
);
ew_branch_add
=
pattern
->
NewNode
(
ew_branch_add_repr
())
->
assert_is_op
(
"elementwise_add"
)
->
assert_more
([](
Node
*
node
)
{
if
(
node
->
inputs
.
size
()
!=
2
)
{
return
false
;
}
return
node
->
inputs
[
0
]
->
Var
()
->
GetShape
()
==
node
->
inputs
[
1
]
->
Var
()
->
GetShape
();
});
ew_branch_add_out
=
pattern
->
NewNode
(
ew_branch_add_out_repr
())
->
assert_is_op_output
(
"elementwise_add"
,
"Out"
);
if
(
!
act_type_
.
empty
())
{
ew_branch_add_out
->
assert_has_n_outputs
(
1
);
}
ew_branch_add
->
LinksFrom
({
bn_out
,
ew_branch_add_in
})
.
LinksTo
({
ew_branch_add_out
});
}
else
{
...
...
@@ -401,6 +416,7 @@ int Conv2dXPUFusePass::ApplyImpl(ir::Graph* graph,
scope
->
FindVar
(
conv_filter
->
Name
())
->
GetMutable
<
phi
::
DenseTensor
>
();
auto
filter_dims
=
filter_t
->
dims
();
bool
has_bias
=
with_bn
||
with_conv_bias
;
bool
has_branch
=
with_branch_x
||
with_branch_y
;
// Create conv_fusion_bias (conv bias) variable
Node
*
fusion_bias_node
=
nullptr
;
if
(
has_bias
)
{
...
...
@@ -501,18 +517,17 @@ int Conv2dXPUFusePass::ApplyImpl(ir::Graph* graph,
framework
::
OpDesc
conv2d_xpu_op_desc
(
block
);
// set input&output var
conv2d_xpu_op_desc
.
SetType
(
"conv2d_xpu"
);
conv2d_xpu_op_desc
.
SetInput
(
"
input
"
,
{
input
->
Name
()});
conv2d_xpu_op_desc
.
SetInput
(
"
x
"
,
{
input
->
Name
()});
conv2d_xpu_op_desc
.
SetInput
(
"filter"
,
{
filter_int16
->
Name
()});
conv2d_xpu_op_desc
.
SetInput
(
"filter_max"
,
{
filter_max
->
Name
()});
conv2d_xpu_op_desc
.
SetOutput
(
"out
put
"
,
{
conv2d_xpu_out_name
});
conv2d_xpu_op_desc
.
SetOutput
(
"out
put
_max"
,
{
conv_out_max_name
});
conv2d_xpu_op_desc
.
SetOutput
(
"out"
,
{
conv2d_xpu_out_name
});
conv2d_xpu_op_desc
.
SetOutput
(
"out_max"
,
{
conv_out_max_name
});
// set fusion_bias input node
if
(
has_bias
)
{
conv2d_xpu_op_desc
.
SetInput
(
"bias"
,
{
fusion_bias_node
->
Name
()});
conv2d_xpu_op_desc
.
SetAttr
(
"has_bias"
,
has_bias
);
}
// set ew_branch_add input node
if
(
ew_branch_add
_in
!=
nullptr
)
{
if
(
ew_branch_add
!=
nullptr
)
{
conv2d_xpu_op_desc
.
SetInput
(
"branch"
,
{
ew_branch_add_in
->
Name
()});
}
// set attrs of conv2d_xpu
...
...
@@ -566,7 +581,8 @@ int Conv2dXPUFusePass::ApplyImpl(ir::Graph* graph,
conv2d_xpu_op_desc
.
SetAttr
(
"place_z"
,
std
::
vector
<
int
>
{
10
});
conv2d_xpu_op_desc
.
SetAttr
(
"paddings"
,
conv_paddings
);
conv2d_xpu_op_desc
.
SetAttr
(
"block_lod"
,
std
::
vector
<
int
>
{
1
});
conv2d_xpu_op_desc
.
SetAttr
(
"has_branch"
,
with_branch_x
||
with_branch_y
);
conv2d_xpu_op_desc
.
SetAttr
(
"has_branch"
,
has_branch
);
conv2d_xpu_op_desc
.
SetAttr
(
"has_bias"
,
has_bias
);
auto
*
conv2d_xpu
=
graph
->
CreateOpNode
(
&
conv2d_xpu_op_desc
);
IR_NODE_LINK_TO
(
input
,
conv2d_xpu
);
...
...
paddle/phi/api/yaml/fused_ops.yaml
浏览文件 @
259b0aad
...
...
@@ -5,14 +5,14 @@
# otherwise the operator only could be used in static mode.
-
op
:
conv2d_xpu
args
:
(Tensor
input, Tensor input
_max, Tensor filter, Tensor filter_max, Tensor bias, Tensor branch, int[] paddings, int[] dilations, int[] strides, str padding_algorithm, int groups, bool has_bias, bool has_branch, int act_type, float act_param)
output
:
Tensor(out
put), Tensor(outp
ut_max)
args
:
(Tensor
x, Tensor x
_max, Tensor filter, Tensor filter_max, Tensor bias, Tensor branch, int[] paddings, int[] dilations, int[] strides, str padding_algorithm, int groups, bool has_bias, bool has_branch, int act_type, float act_param)
output
:
Tensor(out
), Tensor(o
ut_max)
infer_meta
:
func
:
Conv2dXPUInferMeta
kernel
:
func
:
conv2d_xpu
data_type
:
input
optional
:
bias, branch,
input
_max
data_type
:
x
optional
:
bias, branch,
x
_max
-
op
:
embedding_with_eltwise_add_xpu
args
:
(Tensor[] ids, Tensor[] tables, int64_t padding_idx)
...
...
paddle/phi/infermeta/fusion.cc
浏览文件 @
259b0aad
...
...
@@ -35,8 +35,8 @@ inline int ConvOutSize(int input_size,
return
output_size
;
}
void
Conv2dXPUInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
input
_max
,
void
Conv2dXPUInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
x
_max
,
const
MetaTensor
&
filter
,
const
MetaTensor
&
filter_max
,
const
MetaTensor
&
bias
,
...
...
@@ -50,9 +50,9 @@ void Conv2dXPUInferMeta(const MetaTensor& input,
bool
has_branch
,
int
act_type
,
float
act_param
,
MetaTensor
*
out
put
,
MetaTensor
*
out
put
_max
)
{
auto
in_dims
=
input
.
dims
();
MetaTensor
*
out
,
MetaTensor
*
out_max
)
{
auto
in_dims
=
x
.
dims
();
auto
filter_dims
=
filter
.
dims
();
// do some checks
PADDLE_ENFORCE_EQ
(
...
...
@@ -157,8 +157,8 @@ void Conv2dXPUInferMeta(const MetaTensor& input,
strides
[
i
]));
}
// set output and output max dims
out
put
->
set_dims
(
DDim
(
out_shape
.
data
(),
out_shape
.
size
()));
out
put
_max
->
set_dims
(
phi
::
make_ddim
({
4
}));
out
->
set_dims
(
DDim
(
out_shape
.
data
(),
out_shape
.
size
()));
out_max
->
set_dims
(
phi
::
make_ddim
({
4
}));
}
void
EmbeddingWithEltwiseAddXPUInferMeta
(
...
...
paddle/phi/infermeta/fusion.h
浏览文件 @
259b0aad
...
...
@@ -22,8 +22,8 @@ namespace phi {
// Common InferMeta Functions for fusion operators.
// NOTE: The InferMeta Functions in this file are arranged in alphabetic order.
void
Conv2dXPUInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
input
_max
,
void
Conv2dXPUInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
x
_max
,
const
MetaTensor
&
filter
,
const
MetaTensor
&
filter_max
,
const
MetaTensor
&
bias
,
...
...
@@ -37,8 +37,8 @@ void Conv2dXPUInferMeta(const MetaTensor& input,
bool
has_branch
,
int
act_type
,
float
act_param
,
MetaTensor
*
out
put
,
MetaTensor
*
out
put
_max
);
MetaTensor
*
out
,
MetaTensor
*
out_max
);
void
EmbeddingWithEltwiseAddXPUInferMeta
(
const
std
::
vector
<
const
MetaTensor
*>&
ids
,
...
...
paddle/phi/kernels/fusion/xpu/conv2d_xpu_kernel.cc
浏览文件 @
259b0aad
...
...
@@ -21,8 +21,8 @@ namespace fusion {
template
<
typename
T
,
typename
Context
>
void
Conv2dXPUKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
const
paddle
::
optional
<
DenseTensor
>&
input
_max
,
const
DenseTensor
&
x
,
const
paddle
::
optional
<
DenseTensor
>&
x
_max
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
filter_max
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
...
...
@@ -36,10 +36,10 @@ void Conv2dXPUKernel(const Context& ctx,
bool
has_branch
,
int
act_type
,
float
act_param
,
DenseTensor
*
out
put
,
DenseTensor
*
out
put
_max
)
{
DenseTensor
*
out
,
DenseTensor
*
out_max
)
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
auto
input_dims
=
input
.
dims
();
auto
input_dims
=
x
.
dims
();
auto
filter_dims
=
filter
.
dims
();
// update paddings and dilations accoring to padding_algorithm
std
::
vector
<
int
>
paddings_vec
=
paddings
;
...
...
@@ -62,17 +62,16 @@ void Conv2dXPUKernel(const Context& ctx,
int
win_h
=
static_cast
<
int
>
(
filter_dims
[
2
]);
int
win_w
=
static_cast
<
int
>
(
filter_dims
[
3
]);
auto
*
input_data
=
reinterpret_cast
<
const
XPUType
*>
(
input
.
data
<
T
>
());
const
float
*
input_max_data
=
input_max
.
get_ptr
()
==
nullptr
?
nullptr
:
input_max
.
get_ptr
()
->
data
<
float
>
();
auto
*
input_data
=
reinterpret_cast
<
const
XPUType
*>
(
x
.
data
<
T
>
());
const
float
*
input_max_data
=
x_max
.
get_ptr
()
==
nullptr
?
nullptr
:
x_max
.
get_ptr
()
->
data
<
float
>
();
auto
*
branch_data
=
branch
.
get_ptr
()
==
nullptr
?
nullptr
:
reinterpret_cast
<
const
XPUType
*>
(
branch
.
get_ptr
()
->
data
<
T
>
());
const
float
*
bias_data
=
bias
.
get_ptr
()
==
nullptr
?
nullptr
:
bias
.
get_ptr
()
->
data
<
float
>
();
auto
*
out_data
=
reinterpret_cast
<
XPUType
*>
(
ctx
.
template
Alloc
<
T
>(
out
put
));
auto
*
out_data
=
reinterpret_cast
<
XPUType
*>
(
ctx
.
template
Alloc
<
T
>(
out
));
xpu
::
Activation_t
act
(
static_cast
<
xpu
::
Activation_t
::
act_enum
>
(
act_type
));
if
(
act_type
==
xpu
::
Activation_t
::
LEAKY_RELU
)
{
...
...
@@ -98,13 +97,13 @@ void Conv2dXPUKernel(const Context& ctx,
/* int64_t groups */
groups
,
/* const float* in_maxptr */
input_max_data
,
/* const float* filter_maxptr */
filter_max
.
data
<
float
>
(),
/* float* out_maxptr */
ctx
.
template
Alloc
<
float
>(
out
put
_max
),
/* float* out_maxptr */
ctx
.
template
Alloc
<
float
>(
out_max
),
/* bool is_nchw */
true
,
/* const float* bias */
bias_data
,
/* const TY* branch */
branch_data
,
/* const baidu::xpu::api::Activation_t& act */
act
,
/* const float* branch_maxptr */
nullptr
);
//
/* const float* scale */ nullptr);
/* const float* branch_maxptr */
nullptr
,
/* const float* scale */
nullptr
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"conv2d_xpu"
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录