Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
18c0a002
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
18c0a002
编写于
1月 02, 2023
作者:
H
Hulek
提交者:
GitHub
1月 02, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Scale Matmul Fuse pass rewritten (#49105)
上级
aa96ddc3
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
69 addition
and
207 deletion
+69
-207
paddle/fluid/framework/ir/CMakeLists.txt
paddle/fluid/framework/ir/CMakeLists.txt
+0
-4
paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc
...luid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc
+0
-117
python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py
...ttests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py
+69
-86
未找到文件。
paddle/fluid/framework/ir/CMakeLists.txt
浏览文件 @
18c0a002
...
@@ -429,10 +429,6 @@ if(WITH_MKLDNN)
...
@@ -429,10 +429,6 @@ if(WITH_MKLDNN)
test_conv_batch_norm_mkldnn_fuse_pass
test_conv_batch_norm_mkldnn_fuse_pass
SRCS mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc
SRCS mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc
DEPS
${
TEST_CONV_BN_PASS_DEPS
}
)
DEPS
${
TEST_CONV_BN_PASS_DEPS
}
)
cc_test
(
test_scale_matmul_fuse_pass
SRCS mkldnn/scale_matmul_fuse_pass_tester.cc
DEPS scale_matmul_fuse_pass
)
cc_test
(
cc_test
(
test_mkldnn_placement_pass
test_mkldnn_placement_pass
SRCS mkldnn/mkldnn_placement_pass_tester.cc
SRCS mkldnn/mkldnn_placement_pass_tester.cc
...
...
paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass_tester.cc
已删除
100644 → 0
浏览文件 @
aa96ddc3
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.h"
namespace
paddle
{
namespace
framework
{
namespace
ir
{
void
SetOp
(
ProgramDesc
*
prog
,
const
std
::
string
&
type
,
const
std
::
vector
<
std
::
string
>&
inputs
,
const
std
::
vector
<
std
::
string
>&
outputs
,
float
scale
=
1.0
f
,
float
bias
=
0.0
f
)
{
auto
*
op
=
prog
->
MutableBlock
(
0
)
->
AppendOp
();
op
->
SetType
(
type
);
if
(
type
==
"scale"
)
{
op
->
SetInput
(
"X"
,
{
inputs
[
0
]});
op
->
SetAttr
(
"scale"
,
scale
);
op
->
SetAttr
(
"bias"
,
bias
);
}
else
if
(
type
==
"matmul"
)
{
op
->
SetAttr
(
"transpose_X"
,
false
);
op
->
SetAttr
(
"transpose_Y"
,
false
);
op
->
SetInput
(
"X"
,
{
inputs
[
0
]});
if
(
inputs
.
size
()
>
1
)
op
->
SetInput
(
"Y"
,
{
inputs
[
1
]});
op
->
SetAttr
(
"alpha"
,
scale
);
}
else
{
FAIL
()
<<
"Unexpected operator type."
;
}
op
->
SetOutput
(
"Out"
,
{
outputs
[
0
]});
}
// a->scale->b
// (b,c)->matmul->d
ProgramDesc
BuildProgramDesc
(
float
scale
,
float
bias
,
float
alpha
)
{
ProgramDesc
prog
;
for
(
auto
&
v
:
std
::
vector
<
std
::
string
>
({
"a"
,
"b"
,
"c"
,
"d"
}))
{
prog
.
MutableBlock
(
0
)
->
Var
(
v
);
}
SetOp
(
&
prog
,
"scale"
,
{
"a"
},
{
"b"
},
scale
,
bias
);
SetOp
(
&
prog
,
"matmul"
,
{
"b"
,
"c"
},
{
"d"
},
alpha
);
return
prog
;
}
void
MainTest
(
const
ProgramDesc
&
prog
,
int
removed_nodes_count
,
const
std
::
vector
<
std
::
string
>
scale_in_out
,
const
std
::
vector
<
std
::
string
>
matmul_in_out
,
float
alpha
)
{
std
::
unique_ptr
<
ir
::
Graph
>
graph
(
new
ir
::
Graph
(
prog
));
int
original_nodes_num
=
graph
->
Nodes
().
size
();
auto
pass
=
PassRegistry
::
Instance
().
Get
(
"scale_matmul_fuse_pass"
);
graph
.
reset
(
pass
->
Apply
(
graph
.
release
()));
int
current_nodes_num
=
graph
->
Nodes
().
size
();
for
(
auto
*
node
:
graph
->
Nodes
())
{
if
(
node
->
IsOp
())
{
auto
*
op
=
node
->
Op
();
if
(
op
->
Type
()
==
"scale"
)
{
EXPECT_EQ
(
op
->
Input
(
"X"
)[
0
],
scale_in_out
[
0
]);
EXPECT_EQ
(
op
->
Output
(
"Out"
)[
0
],
scale_in_out
[
1
]);
}
else
if
(
op
->
Type
()
==
"matmul"
)
{
EXPECT_EQ
(
op
->
Input
(
"X"
)[
0
],
matmul_in_out
[
0
]);
EXPECT_EQ
(
op
->
Input
(
"Y"
)[
0
],
matmul_in_out
[
1
]);
EXPECT_EQ
(
op
->
Output
(
"Out"
)[
0
],
matmul_in_out
[
2
]);
EXPECT_EQ
(
op
->
GetAttrIfExists
<
float
>
(
"alpha"
),
alpha
);
}
}
}
EXPECT_EQ
(
original_nodes_num
-
removed_nodes_count
,
current_nodes_num
);
}
TEST
(
ScaleMatmulFusePass
,
scale_matmul_with_no_bias
)
{
auto
bias
=
0.0
f
;
auto
scale
=
2.34
f
;
auto
alpha
=
3.45
f
;
int
removed_nodes_count
=
2
;
MainTest
(
BuildProgramDesc
(
scale
,
bias
,
alpha
),
removed_nodes_count
,
{},
{
"a"
,
"c"
,
"d"
},
scale
*
alpha
);
}
TEST
(
ScaleMatmulFusePass
,
scale_matmul_with_bias
)
{
auto
bias
=
1.0
f
;
auto
scale
=
2.34
f
;
auto
alpha
=
3.45
f
;
int
removed_nodes_count
=
0
;
MainTest
(
BuildProgramDesc
(
scale
,
bias
,
alpha
),
removed_nodes_count
,
{
"a"
,
"b"
},
{
"b"
,
"c"
,
"d"
},
alpha
);
}
}
// namespace ir
}
// namespace framework
}
// namespace paddle
USE_PASS
(
scale_matmul_fuse_pass
);
python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py
浏览文件 @
18c0a002
...
@@ -37,74 +37,55 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest):
...
@@ -37,74 +37,55 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest):
input_dim
=
draw
(
st
.
sampled_from
([
1
,
32
,
64
]))
input_dim
=
draw
(
st
.
sampled_from
([
1
,
32
,
64
]))
def
generate_input
(
attrs
,
type
):
def
generate_input
(
attrs
,
type
):
if
attrs
[
1
][
'transpose_X'
]
and
attrs
[
1
][
'transpose_Y'
]:
is_transpose_X
=
attrs
[
1
][
'transpose_X'
]
shape_x
=
[
is_transpose_Y
=
attrs
[
1
][
'transpose_Y'
]
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
if
is_transpose_X
:
attrs
[
2
][
'input_dim'
],
shape_x_3
=
attrs
[
2
][
'input_dim'
]
32
,
shape_x_4
=
32
]
shape_y
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
64
,
attrs
[
2
][
'input_dim'
],
]
elif
attrs
[
1
][
'transpose_X'
]:
shape_x
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
attrs
[
2
][
'input_dim'
],
32
,
]
shape_y
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
attrs
[
2
][
'input_dim'
],
64
,
]
elif
attrs
[
1
][
'transpose_Y'
]:
shape_x
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
32
,
attrs
[
2
][
'input_dim'
],
]
shape_y
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
8
,
attrs
[
2
][
'input_dim'
],
]
else
:
else
:
shape_x
=
[
shape_x_3
=
32
attrs
[
2
][
'batch_size'
],
shape_x_4
=
attrs
[
2
][
'input_dim'
]
attrs
[
2
][
'channel'
],
32
,
if
is_transpose_X
and
is_transpose_Y
:
attrs
[
2
][
'input_dim'
],
shape_y_3
=
64
]
shape_y_4
=
attrs
[
2
][
'input_dim'
]
shape_y
=
[
elif
is_transpose_X
:
attrs
[
2
][
'batch_size'
],
shape_y_3
=
attrs
[
2
][
'input_dim'
]
attrs
[
2
][
'channel'
],
shape_y_4
=
64
attrs
[
2
][
'input_dim'
],
elif
is_transpose_Y
:
16
,
shape_y_3
=
8
]
shape_y_4
=
attrs
[
2
][
'input_dim'
]
if
type
==
"x"
:
return
np
.
random
.
random
(
shape_x
).
astype
(
np
.
float32
)
else
:
else
:
return
np
.
random
.
random
(
shape_y
).
astype
(
np
.
float32
)
shape_y_3
=
attrs
[
2
][
'input_dim'
]
shape_y_4
=
16
shape_x
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
shape_x_3
,
shape_x_4
,
]
shape_y
=
[
attrs
[
2
][
'batch_size'
],
attrs
[
2
][
'channel'
],
shape_y_3
,
shape_y_4
,
]
shape
=
shape_x
if
type
==
'x'
else
shape_y
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
attrs
=
[
attrs
=
[
{
{
"scale"
:
scale
,
'scale'
:
scale
,
"bias"
:
bias
,
'bias'
:
bias
,
"bias_after_scale"
:
bias_after_scale
,
'bias_after_scale'
:
bias_after_scale
,
},
},
{
{
"transpose_X"
:
transpose_X
,
'transpose_X'
:
transpose_X
,
"transpose_Y"
:
transpose_Y
,
'transpose_Y'
:
transpose_Y
,
"alpha"
:
alpha
,
'alpha'
:
alpha
,
},
},
{
{
'batch_size'
:
batch_size
,
'batch_size'
:
batch_size
,
...
@@ -115,29 +96,29 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest):
...
@@ -115,29 +96,29 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest):
ops_config
=
[
ops_config
=
[
{
{
"op_type"
:
"scale"
,
'op_type'
:
'scale'
,
"op_inputs"
:
{
"X"
:
[
"input_data1"
]},
'op_inputs'
:
{
'X'
:
[
'input_data1'
]},
"op_outputs"
:
{
"Out"
:
[
"scale_output"
]},
'op_outputs'
:
{
'Out'
:
[
'scale_output'
]},
"op_attrs"
:
{
'op_attrs'
:
{
"scale"
:
attrs
[
0
][
'scale'
],
'scale'
:
attrs
[
0
][
'scale'
],
"bias"
:
attrs
[
0
][
'bias'
],
'bias'
:
attrs
[
0
][
'bias'
],
"bias_after_scale"
:
attrs
[
0
][
'bias_after_scale'
],
'bias_after_scale'
:
attrs
[
0
][
'bias_after_scale'
],
},
},
},
},
{
{
"op_type"
:
"matmul"
,
'op_type'
:
'matmul'
,
"op_inputs"
:
{
"X"
:
[
"scale_output"
],
"Y"
:
[
"input_data2"
]},
'op_inputs'
:
{
'X'
:
[
'scale_output'
],
'Y'
:
[
'input_data2'
]},
"op_outputs"
:
{
"Out"
:
[
"matmul_output"
]},
'op_outputs'
:
{
'Out'
:
[
'matmul_output'
]},
"op_attrs"
:
{
'op_attrs'
:
{
'transpose_X'
:
attrs
[
1
][
'transpose_X'
],
'transpose_X'
:
attrs
[
1
][
'transpose_X'
],
'transpose_Y'
:
attrs
[
1
][
'transpose_Y'
],
'transpose_Y'
:
attrs
[
1
][
'transpose_Y'
],
'alpha'
:
attrs
[
1
][
'alpha'
],
'alpha'
:
attrs
[
1
][
'alpha'
],
"fused_reshape_X"
:
[],
'fused_reshape_X'
:
[],
"fused_reshape_Y"
:
[],
'fused_reshape_Y'
:
[],
"fused_transpose_X"
:
[],
'fused_transpose_X'
:
[],
"fused_transpose_Y"
:
[],
'fused_transpose_Y'
:
[],
"fused_reshape_Out"
:
[],
'fused_reshape_Out'
:
[],
"fused_transpose_Out"
:
[],
'fused_transpose_Out'
:
[],
},
},
},
},
]
]
...
@@ -148,25 +129,27 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest):
...
@@ -148,25 +129,27 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest):
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data1"
:
TensorConfig
(
'input_data1'
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
attrs
,
"x"
)
data_gen
=
partial
(
generate_input
,
attrs
,
'x'
)
),
),
"input_data2"
:
TensorConfig
(
'input_data2'
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
attrs
,
"y"
)
data_gen
=
partial
(
generate_input
,
attrs
,
'y'
)
),
),
},
},
outputs
=
[
"matmul_output"
],
outputs
=
[
'matmul_output'
],
)
)
return
program_config
return
program_config
def
sample_predictor_configs
(
self
,
program_config
):
def
sample_predictor_configs
(
self
,
program_config
):
config
=
self
.
create_inference_config
(
use_mkldnn
=
True
)
config
=
self
.
create_inference_config
(
use_mkldnn
=
True
,
passes
=
[
'scale_matmul_fuse_pass'
]
)
yield
config
,
[
'matmul'
],
(
1e-5
,
1e-5
)
yield
config
,
[
'matmul'
],
(
1e-5
,
1e-5
)
def
test
(
self
):
def
test
(
self
):
self
.
run_and_statis
(
quant
=
False
,
passes
=
[
"scale_matmul_fuse_pass"
])
self
.
run_and_statis
(
quant
=
False
,
passes
=
[
'scale_matmul_fuse_pass'
])
if
__name__
==
"__main__"
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录