Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
092a2b14
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
092a2b14
编写于
2月 05, 2021
作者:
A
Adam Osewski
提交者:
GitHub
2月 05, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
More UT for LayerNormFuse pass (#30891)
* Additionally change to not throw error from inside pass.
上级
a80fe67f
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
426 addition
and
192 deletion
+426
-192
paddle/fluid/framework/ir/layer_norm_fuse_pass.cc
paddle/fluid/framework/ir/layer_norm_fuse_pass.cc
+79
-55
paddle/fluid/framework/ir/layer_norm_fuse_pass_tester.cc
paddle/fluid/framework/ir/layer_norm_fuse_pass_tester.cc
+326
-130
paddle/fluid/framework/ir/pass_test_util.cc
paddle/fluid/framework/ir/pass_test_util.cc
+15
-6
paddle/fluid/framework/ir/pass_test_util.h
paddle/fluid/framework/ir/pass_test_util.h
+6
-1
未找到文件。
paddle/fluid/framework/ir/layer_norm_fuse_pass.cc
浏览文件 @
092a2b14
...
...
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include <vector>
#include "paddle/fluid/framework/framework.pb.h"
...
...
@@ -22,6 +21,7 @@
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/string/pretty_log.h"
#include "paddle/fluid/string/printf.h"
namespace
paddle
{
namespace
framework
{
...
...
@@ -30,34 +30,57 @@ namespace ir {
// cpplint complaints (wrong!) for not included <string> header in below line.
using
string
::
PrettyLogDetail
;
// NOLINT
#define CHECK_TRUE(expr, err_msg) \
do { \
int e_ = (expr); \
if (!e_) { \
VLOG(4) << err_msg; \
return; \
} \
} while (0)
#define EXPECT_TRUE(expr, err_msg) \
do { \
int e_ = (expr); \
if (!e_) { \
VLOG(4) << err_msg; \
return false; \
} \
} while (0)
namespace
{
void
validateReduceOpAttrs
(
const
Node
*
node
,
const
std
::
string
&
name
)
{
bool
validateReduceOpAttrs
(
const
Node
*
node
,
const
std
::
string
&
name
)
{
const
auto
*
op
=
node
->
Op
();
if
(
op
->
HasAttr
(
"dim"
))
{
auto
dims
=
BOOST_GET_CONST
(
std
::
vector
<
int
>
,
op
->
GetAttr
(
"dim"
));
PADDLE_ENFORCE_EQ
(
dims
.
size
(),
1
,
platform
::
errors
::
PreconditionNotMet
(
"The LayerNorm fusion "
,
name
,
" reduction must happen only over "
"single dimension."
));
PADDLE_ENFORCE_EQ
(
dims
.
front
(),
-
1
,
platform
::
errors
::
PreconditionNotMet
(
"The LayerNorm fusion "
,
name
,
" reduction must happen over last "
"dimension."
));
EXPECT_TRUE
(
dims
.
size
()
==
1
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm fusion %s reduction must happen only over single "
"dimension."
,
name
));
EXPECT_TRUE
(
dims
.
front
()
==
-
1
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm fusion %s reduction "
"must happen over last dimension."
,
name
));
}
if
(
op
->
HasAttr
(
"reduce_all"
))
{
PADDLE_ENFORCE
(
!
BOOST_GET_CONST
(
bool
,
op
->
GetAttr
(
"reduce_all"
)),
platform
::
errors
::
PreconditionNotMet
(
"The LayerNorm fusion "
,
name
,
" reduction must have "
"
\'
reduce_all
\'
attribute set to false."
));
EXPECT_TRUE
(
!
BOOST_GET_CONST
(
bool
,
op
->
GetAttr
(
"reduce_all"
)),
::
paddle
::
string
::
Sprintf
(
"The LayerNorm fusion %s"
"reduction must have
\'
reduce_all
\'
attribute set to false."
,
name
));
}
if
(
op
->
HasAttr
(
"keep_dim"
))
{
PADDLE_ENFORC
E
(
BOOST_GET_CONST
(
bool
,
op
->
GetAttr
(
"keep_dim"
)),
platform
::
errors
::
PreconditionNotMet
(
"The LayerNorm fusion "
,
name
,
" reduction must have "
"
\'
keep_dim
\'
attribute set to true."
));
EXPECT_TRU
E
(
BOOST_GET_CONST
(
bool
,
op
->
GetAttr
(
"keep_dim"
)),
::
paddle
::
string
::
Sprintf
(
"The LayerNorm fusion %s"
" reduction must have
\'
keep_dim
\'
attribute set to true."
,
name
));
}
return
true
;
}
void
setIntermediateOut
(
OpDesc
*
desc
,
const
std
::
string
&
out_name
,
...
...
@@ -129,48 +152,46 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
auto
*
eps_tensor
=
scope
->
FindVar
(
eps
->
Name
())
->
GetMutable
<
LoDTensor
>
();
// ------------------ subgraph node's validation ---------------------------
PADDLE_ENFORCE_EQ
(
eps_tensor
->
numel
(),
1
,
platform
::
errors
::
InvalidArgument
(
"The LayerNorm divisor "
"epsilon value must be one-element tensor, but has %s "
"elements."
,
CHECK_TRUE
(
eps_tensor
->
numel
()
==
1
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm divisor epsilon value must be one-element tensor, "
"but has %s elements."
,
eps_tensor
->
numel
()));
PADDLE_ENFORCE_EQ
(
eps_tensor
->
type
(),
proto
::
VarType
::
FP32
,
platform
::
errors
::
InvalidArgument
(
"The LayerNorm divisor
"
"epsilon value
must be of FP32 data type, but is %s."
,
eps_tensor
->
type
()));
CHECK_TRUE
(
eps_tensor
->
type
()
==
proto
::
VarType
::
FP32
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm divisor epsilon value
"
"
must be of FP32 data type, but is %s."
,
eps_tensor
->
type
()));
const
auto
&
gamma_shape
=
gamma
->
Var
()
->
GetShape
();
const
auto
&
beta_shape
=
beta
->
Var
()
->
GetShape
();
const
auto
&
x_shape
=
x
->
Var
()
->
GetShape
();
int64_t
x_last_dim
=
x_shape
.
back
();
PADDLE_ENFORCE_EQ
(
gamma_shape
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"The LayerNorm gamma "
"(scale) tensor shape must be one-dimensional, "
"but is %s."
,
gamma_shape
.
size
()));
PADDLE_ENFORCE_EQ
(
beta_shape
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"The LayerNorm beta "
"(shift) tensor shape must be one-dimensional, "
"but is %s."
,
beta_shape
.
size
()));
PADDLE_ENFORCE_EQ
(
beta_shape
,
gamma_shape
,
platform
::
errors
::
InvalidArgument
(
"The LayerNorm beta "
"and gamma tensors shapes' must be equal."
));
PADDLE_ENFORCE_EQ
(
gamma_shape
.
front
(),
x_last_dim
,
platform
::
errors
::
InvalidArgument
(
"The LayerNorm beta "
"and gamma tensors shapes' must be equal to the last "
"input's dimension size."
));
validateReduceOpAttrs
(
x_mean
,
"input mean"
);
validateReduceOpAttrs
(
std_dev
,
"std_dev mean"
);
CHECK_TRUE
(
gamma_shape
.
size
()
==
1
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm gamma (scale) tensor "
"shape must be one-dimensional, but is %s."
,
gamma_shape
.
size
()));
CHECK_TRUE
(
beta_shape
.
size
()
==
1
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm beta (shift) tensor "
"shape must be one-dimensional, but is %s."
,
beta_shape
.
size
()));
CHECK_TRUE
(
beta_shape
==
gamma_shape
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm beta and gamma tensors "
"shapes' must be equal."
));
CHECK_TRUE
(
gamma_shape
.
front
()
==
x_last_dim
,
::
paddle
::
string
::
Sprintf
(
"The LayerNorm beta and gamma tensors "
"shapes' must be equal to the last input's dimension size."
));
CHECK_TRUE
(
validateReduceOpAttrs
(
x_mean
,
"input mean"
),
"Validation of input mean node failed."
);
CHECK_TRUE
(
validateReduceOpAttrs
(
std_dev
,
"std_dev mean"
),
"Validation of standard deviation node failed."
);
// ------------------ op creation and placement ---------------------------
...
...
@@ -213,6 +234,9 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
}
// namespace framework
}
// namespace paddle
#undef CHECK_TRUE
#undef EXPECT_TRUE
REGISTER_PASS
(
layer_norm_fuse_pass
,
paddle
::
framework
::
ir
::
LayerNormFusePass
);
REGISTER_PASS_CAPABILITY
(
layer_norm_fuse_pass
)
.
AddCombination
(
...
...
paddle/fluid/framework/ir/layer_norm_fuse_pass_tester.cc
浏览文件 @
092a2b14
...
...
@@ -13,7 +13,10 @@
// limitations under the License.
#include <gtest/gtest.h>
#include <memory>
#include <vector>
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/ir/layer_norm_fuse_pass.h"
#include "paddle/fluid/framework/ir/pass_test_util.h"
...
...
@@ -31,100 +34,153 @@ namespace ir {
namespace
{
ProgramDesc
BuildGraphProgram
()
{
auto
prog
=
test
::
BuildProgramDesc
(
{
"x"
,
"x_mean_out"
,
"x_sub_mean_out"
,
"x_sub_mean_sqr_out"
,
"std_dev_out"
,
"std_dev_eps_out"
,
"std_dev_eps_sqrt_out"
,
"division_out"
,
"scale_out"
,
"shift_out"
},
{
"sqr_pow"
,
"eps"
,
"gamma"
,
"beta"
});
const
auto
&
block_desc
=
prog
.
Block
(
0
);
auto
*
x_var_desc
=
block_desc
.
FindVar
(
"x"
);
x_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
x_var_desc
->
SetShape
({
3
,
32
,
48
});
auto
*
eps_var_desc
=
block_desc
.
FindVar
(
"eps"
);
eps_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
eps_var_desc
->
SetShape
({
1
});
auto
*
gamma_var_desc
=
block_desc
.
FindVar
(
"gamma"
);
gamma_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
gamma_var_desc
->
SetShape
({
48
});
auto
*
beta_var_desc
=
block_desc
.
FindVar
(
"beta"
);
beta_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
beta_var_desc
->
SetShape
({
48
});
auto
*
x_mean
=
test
::
CreateOp
(
&
prog
,
"reduce_mean"
,
{{
"X"
,
"x"
}},
{{
"Out"
,
"x_mean_out"
}},
false
);
x_mean
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
-
1
});
x_mean
->
SetAttr
(
"keep_dim"
,
true
);
x_mean
->
SetAttr
(
"reduce_all"
,
false
);
test
::
CreateOp
(
&
prog
,
"elementwise_sub"
,
{{
"X"
,
"x"
},
{
"Y"
,
"x_mean_out"
}},
{{
"Out"
,
"x_sub_mean_out"
}},
false
);
test
::
CreateOp
(
&
prog
,
"elementwise_pow"
,
{{
"X"
,
"x_sub_mean_out"
},
{
"Y"
,
"sqr_pow"
}},
{{
"Out"
,
"x_sub_mean_sqr_out"
}},
false
);
auto
*
std_dev
=
test
::
CreateOp
(
&
prog
,
"reduce_mean"
,
{{
"X"
,
"x_sub_mean_sqr_out"
}},
{{
"Out"
,
"std_dev_out"
}},
false
);
std_dev
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
-
1
});
std_dev
->
SetAttr
(
"keep_dim"
,
true
);
std_dev
->
SetAttr
(
"reduce_all"
,
false
);
test
::
CreateOp
(
&
prog
,
"elementwise_add"
,
{{
"X"
,
"std_dev_out"
},
{
"Y"
,
"eps"
}},
{{
"Out"
,
"std_dev_eps_out"
}},
false
);
test
::
CreateOp
(
&
prog
,
"sqrt"
,
{{
"X"
,
"std_dev_eps_out"
}},
{{
"Out"
,
"std_dev_eps_sqrt_out"
}},
false
);
test
::
CreateOp
(
&
prog
,
"elementwise_div"
,
{{
"X"
,
"x_sub_mean_out"
},
{
"Y"
,
"std_dev_eps_sqrt_out"
}},
{{
"Out"
,
"division_out"
}},
false
);
test
::
CreateOp
(
&
prog
,
"elementwise_mul"
,
{{
"X"
,
"division_out"
},
{
"Y"
,
"gamma"
}},
{{
"Out"
,
"scale_out"
}},
false
);
test
::
CreateOp
(
&
prog
,
"elementwise_add"
,
{{
"X"
,
"scale_out"
},
{
"Y"
,
"beta"
}},
{{
"Out"
,
"shift_out"
}},
false
);
return
prog
;
}
bool
CheckFusedSubgraphOpsCount
(
const
Graph
&
graph
)
{
return
test
::
AssertOpsCount
(
graph
,
{{
"reduce_mean"
,
0
},
{
"elementwise_sub"
,
0
},
{
"elementwise_pow"
,
0
},
{
"elementwise_add"
,
0
},
{
"sqrt"
,
0
},
{
"elementwise_div"
,
0
},
{
"elementwise_mul"
,
0
},
{
"layer_norm"
,
1
}});
}
class
LayerNormFuseTest
{
public:
LayerNormFuseTest
()
:
m_prog
{
test
::
BuildProgramDesc
(
{
"x"
,
"x_mean_out"
,
"x_sub_mean_out"
,
"x_sub_mean_sqr_out"
,
"std_dev_out"
,
"std_dev_eps_out"
,
"std_dev_eps_sqrt_out"
,
"division_out"
,
"scale_out"
,
"shift_out"
},
{
"sqr_pow"
,
"eps"
,
"gamma"
,
"beta"
})},
m_place
{},
m_exe
{
m_place
},
m_block_desc
{
m_prog
.
Block
(
0
)}
{
auto
*
x_var_desc
=
m_block_desc
.
FindVar
(
"x"
);
x_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
x_var_desc
->
SetShape
({
3
,
32
,
48
});
auto
*
eps_var_desc
=
m_block_desc
.
FindVar
(
"eps"
);
eps_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
eps_var_desc
->
SetShape
({
1
});
auto
*
gamma_var_desc
=
m_block_desc
.
FindVar
(
"gamma"
);
gamma_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
gamma_var_desc
->
SetShape
({
48
});
auto
*
beta_var_desc
=
m_block_desc
.
FindVar
(
"beta"
);
beta_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
beta_var_desc
->
SetShape
({
48
});
auto
*
x_mean
=
test
::
CreateOp
(
&
m_prog
,
"reduce_mean"
,
{{
"X"
,
"x"
}},
{{
"Out"
,
"x_mean_out"
}},
false
);
x_mean
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
-
1
});
x_mean
->
SetAttr
(
"keep_dim"
,
true
);
x_mean
->
SetAttr
(
"reduce_all"
,
false
);
test
::
CreateOp
(
&
m_prog
,
"elementwise_sub"
,
{{
"X"
,
"x"
},
{
"Y"
,
"x_mean_out"
}},
{{
"Out"
,
"x_sub_mean_out"
}},
false
);
test
::
CreateOp
(
&
m_prog
,
"elementwise_pow"
,
{{
"X"
,
"x_sub_mean_out"
},
{
"Y"
,
"sqr_pow"
}},
{{
"Out"
,
"x_sub_mean_sqr_out"
}},
false
);
auto
*
std_dev
=
test
::
CreateOp
(
&
m_prog
,
"reduce_mean"
,
{{
"X"
,
"x_sub_mean_sqr_out"
}},
{{
"Out"
,
"std_dev_out"
}},
false
);
std_dev
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
-
1
});
std_dev
->
SetAttr
(
"keep_dim"
,
true
);
std_dev
->
SetAttr
(
"reduce_all"
,
false
);
test
::
CreateOp
(
&
m_prog
,
"elementwise_add"
,
{{
"X"
,
"std_dev_out"
},
{
"Y"
,
"eps"
}},
{{
"Out"
,
"std_dev_eps_out"
}},
false
);
test
::
CreateOp
(
&
m_prog
,
"sqrt"
,
{{
"X"
,
"std_dev_eps_out"
}},
{{
"Out"
,
"std_dev_eps_sqrt_out"
}},
false
);
test
::
CreateOp
(
&
m_prog
,
"elementwise_div"
,
{{
"X"
,
"x_sub_mean_out"
},
{
"Y"
,
"std_dev_eps_sqrt_out"
}},
{{
"Out"
,
"division_out"
}},
false
);
test
::
CreateOp
(
&
m_prog
,
"elementwise_mul"
,
{{
"X"
,
"division_out"
},
{
"Y"
,
"gamma"
}},
{{
"Out"
,
"scale_out"
}},
false
);
test
::
CreateOp
(
&
m_prog
,
"elementwise_add"
,
{{
"X"
,
"scale_out"
},
{
"Y"
,
"beta"
}},
{{
"Out"
,
"shift_out"
}},
false
);
}
template
<
typename
Func
>
LayerNormFuseTest
(
const
Func
&
func
,
int
removed_nodes
=
0
,
int
added_nodes
=
0
)
:
LayerNormFuseTest
()
{
m_removed_nodes
=
removed_nodes
;
m_added_nodes
=
added_nodes
;
func
(
m_block_desc
);
}
void
setupGraph
()
{
auto
initFun
=
[
this
](
const
Scope
&
scope
,
const
paddle
::
platform
::
CPUPlace
&
place
)
{
this
->
initEpsTensorValue
(
scope
,
place
);
};
setupGraphWithInitFunc
(
initFun
);
}
template
<
typename
Func
>
void
setupGraphWithInitFunc
(
const
Func
&
func
)
{
m_graph
.
reset
(
new
Graph
(
m_prog
));
// Init scope, as it is used in pass
m_exe
.
CreateVariables
(
m_prog
,
0
,
true
,
&
m_scope
);
func
(
m_scope
,
m_place
);
m_graph
->
SetNotOwned
(
kParamScopeAttr
,
&
m_scope
);
}
void
run
(
bool
fusion
=
false
)
const
{
EXPECT_TRUE
(
test
::
RunPassAndAssert
(
m_graph
.
get
(),
"layer_norm_fuse_pass"
,
"x"
,
"shift_out"
,
m_removed_nodes
,
m_added_nodes
));
EXPECT_TRUE
(
CheckSubgraphOpsCount
(
*
m_graph
,
fusion
));
}
const
ProgramDesc
&
getProgramDesc
()
const
{
return
m_prog
;
}
const
Graph
*
getGraph
()
const
{
return
m_graph
.
get
();
}
private:
void
initEpsTensorValue
(
const
Scope
&
scope
,
const
paddle
::
platform
::
CPUPlace
&
place
)
{
float
eps_value
=
1e-5
;
test
::
InitLoDTensorHolder
<
float
>
(
scope
,
place
,
"eps"
,
{
1
},
&
eps_value
);
}
bool
CheckSubgraphOpsCount
(
const
Graph
&
graph
,
bool
fusion
)
const
{
if
(
fusion
)
return
test
::
AssertOpsCount
(
graph
,
{{
"reduce_mean"
,
0
},
{
"elementwise_sub"
,
0
},
{
"elementwise_pow"
,
0
},
{
"elementwise_add"
,
0
},
{
"sqrt"
,
0
},
{
"elementwise_div"
,
0
},
{
"elementwise_mul"
,
0
},
{
"layer_norm"
,
1
}});
else
return
test
::
AssertOpsCount
(
graph
,
{{
"reduce_mean"
,
2
},
{
"elementwise_sub"
,
1
},
{
"elementwise_pow"
,
1
},
{
"elementwise_add"
,
2
},
{
"sqrt"
,
1
},
{
"elementwise_div"
,
1
},
{
"elementwise_mul"
,
1
},
{
"layer_norm"
,
0
}});
}
int
m_removed_nodes
{
19
};
int
m_added_nodes
{
3
};
ProgramDesc
m_prog
;
paddle
::
platform
::
CPUPlace
m_place
;
NaiveExecutor
m_exe
;
const
BlockDesc
&
m_block_desc
;
Scope
m_scope
;
std
::
unique_ptr
<
Graph
>
m_graph
{
nullptr
};
};
}
// namespace
// ------------------------------ Test cases -----------------------------------
TEST
(
FuseLayerNormPass
,
TestFuse
)
{
ProgramDesc
prog
=
BuildGraphProgram
();
Graph
graph
(
prog
);
constexpr
int
removed_nodes
=
19
;
// LayerNorm + outputs: {Mean, Variance}
constexpr
int
added_nodes
=
3
;
auto
place
=
paddle
::
platform
::
CPUPlace
();
NaiveExecutor
exe
{
place
};
Scope
scope
;
float
eps_value
=
1e-5
f
;
// Init scope, as it is used in pass
exe
.
CreateVariables
(
prog
,
0
,
true
,
&
scope
);
test
::
InitLoDTensorHolder
<
float
>
(
&
scope
,
place
,
"eps"
,
{
1
},
&
eps_value
);
graph
.
SetNotOwned
(
kParamScopeAttr
,
&
scope
);
EXPECT_TRUE
(
test
::
RunPassAndAssert
(
&
graph
,
"layer_norm_fuse_pass"
,
"x"
,
"shift_out"
,
removed_nodes
,
added_nodes
));
EXPECT_TRUE
(
CheckFusedSubgraphOpsCount
(
graph
));
for
(
const
auto
*
node
:
graph
.
Nodes
())
{
LayerNormFuseTest
lnorm_test
;
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
true
);
// additional attribute checks
for
(
const
auto
*
node
:
lnorm_test
.
getGraph
()
->
Nodes
())
{
if
(
node
->
IsOp
()
&&
node
->
Op
()
->
Type
()
==
"layer_norm"
)
{
const
auto
*
op
=
node
->
Op
();
ASSERT_TRUE
(
op
->
HasAttr
(
"is_test"
));
...
...
@@ -136,54 +192,194 @@ TEST(FuseLayerNormPass, TestFuse) {
}
TEST
(
FuseLayerNormPass
,
TestInvalidEpsNumel
)
{
ProgramDesc
prog
=
BuildGraphProgram
();
const
auto
editEpsFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
eps_var_desc
=
block_desc
.
FindVar
(
"eps"
);
eps_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
eps_var_desc
->
SetShape
({
2
});
};
const
auto
initEpsTensor
=
[](
const
Scope
&
scope
,
const
paddle
::
platform
::
CPUPlace
&
place
)
{
auto
eps_values
=
std
::
vector
<
float
>
{
1e-5
f
,
1e-5
f
};
test
::
InitLoDTensorHolder
<
float
>
(
scope
,
place
,
"eps"
,
{
2
},
eps_values
.
data
());
};
LayerNormFuseTest
lnorm_test
(
editEpsFun
);
lnorm_test
.
setupGraphWithInitFunc
(
initEpsTensor
);
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
TestInvalidEpsDataType
)
{
const
auto
editEpsFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
eps_var_desc
=
block_desc
.
FindVar
(
"eps"
);
eps_var_desc
->
SetDataType
(
proto
::
VarType
::
FP64
);
eps_var_desc
->
SetShape
({
1
});
};
const
auto
initEpsTensor
=
[](
const
Scope
&
scope
,
const
paddle
::
platform
::
CPUPlace
&
place
)
{
double
eps_value
=
1e-5
;
test
::
InitLoDTensorHolder
<
double
>
(
scope
,
place
,
"eps"
,
{
1
},
&
eps_value
);
};
LayerNormFuseTest
lnorm_test
(
editEpsFun
);
lnorm_test
.
setupGraphWithInitFunc
(
initEpsTensor
);
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
TestInvalidGammaRank
)
{
const
auto
editGammaFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
gamma_var_desc
=
block_desc
.
FindVar
(
"gamma"
);
gamma_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
gamma_var_desc
->
SetShape
({
48
,
32
});
};
auto
*
eps_var_desc
=
prog
.
Block
(
0
).
FindVar
(
"eps"
);
eps_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
eps_var_desc
->
SetShape
({
2
});
LayerNormFuseTest
lnorm_test
(
editGammaFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
TestInvalidBetaRank
)
{
const
auto
editBetaFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
beta_var_desc
=
block_desc
.
FindVar
(
"beta"
);
beta_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
beta_var_desc
->
SetShape
({
48
,
32
});
};
Graph
graph
(
prog
);
constexpr
int
removed_nodes
=
19
;
constexpr
int
added_nodes
=
3
;
LayerNormFuseTest
lnorm_test
(
editBetaFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
auto
place
=
paddle
::
platform
::
CPUPlace
();
NaiveExecutor
exe
{
place
};
Scope
scope
;
auto
eps_values
=
std
::
vector
<
float
>
{
1e-5
f
,
1e-5
f
};
// Init scope, as it is used in pass
exe
.
CreateVariables
(
prog
,
0
,
true
,
&
scope
);
test
::
InitLoDTensorHolder
<
float
>
(
&
scope
,
place
,
"eps"
,
{
2
},
eps_values
.
data
());
TEST
(
FuseLayerNormPass
,
TestUnequalGammaBetaShapes
)
{
const
auto
editGammaBetaFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
beta_var_desc
=
block_desc
.
FindVar
(
"beta"
);
beta_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
beta_var_desc
->
SetShape
({
32
});
};
graph
.
SetNotOwned
(
kParamScopeAttr
,
&
scope
);
EXPECT_THROW
(
test
::
RunPassAndAssert
(
&
graph
,
"layer_norm_fuse_pass"
,
"x"
,
"shift_out"
,
removed_nodes
,
added_nodes
),
paddle
::
platform
::
EnforceNotMet
);
LayerNormFuseTest
lnorm_test
(
editGammaBetaFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
TestInvalidEpsDataType
)
{
ProgramDesc
prog
=
BuildGraphProgram
();
auto
*
eps_var_desc
=
prog
.
Block
(
0
).
FindVar
(
"eps"
);
eps_var_desc
->
SetDataType
(
proto
::
VarType
::
FP64
);
eps_var_desc
->
SetShape
({
1
});
Graph
graph
(
prog
);
constexpr
int
removed_nodes
=
19
;
constexpr
int
added_nodes
=
3
;
auto
place
=
paddle
::
platform
::
CPUPlace
();
NaiveExecutor
exe
{
place
};
Scope
scope
;
double
eps_value
=
1e-5
;
// Init scope, as it is used in pass
exe
.
CreateVariables
(
prog
,
0
,
true
,
&
scope
);
test
::
InitLoDTensorHolder
<
double
>
(
&
scope
,
place
,
"eps"
,
{
1
},
&
eps_value
);
graph
.
SetNotOwned
(
kParamScopeAttr
,
&
scope
);
EXPECT_THROW
(
test
::
RunPassAndAssert
(
&
graph
,
"layer_norm_fuse_pass"
,
"x"
,
"shift_out"
,
removed_nodes
,
added_nodes
),
paddle
::
platform
::
EnforceNotMet
);
TEST
(
FuseLayerNormPass
,
TestGammaBetaUnequalInputChannelShape
)
{
const
auto
editGammaBetaFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
beta_var_desc
=
block_desc
.
FindVar
(
"beta"
);
beta_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
beta_var_desc
->
SetShape
({
32
});
auto
*
gamma_var_desc
=
block_desc
.
FindVar
(
"gamma"
);
gamma_var_desc
->
SetDataType
(
proto
::
VarType
::
FP32
);
gamma_var_desc
->
SetShape
({
32
});
};
LayerNormFuseTest
lnorm_test
(
editGammaBetaFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadInMeanDimAttrRank
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
x_mean_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"x_mean_out"
);
ASSERT_NE
(
x_mean_desc
,
nullptr
);
x_mean_desc
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
1
,
1
});
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadInMeanDimAttr
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
x_mean_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"x_mean_out"
);
ASSERT_NE
(
x_mean_desc
,
nullptr
);
x_mean_desc
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
1
});
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadInMeanKeepDimAttr
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
x_mean_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"x_mean_out"
);
ASSERT_NE
(
x_mean_desc
,
nullptr
);
x_mean_desc
->
SetAttr
(
"keep_dim"
,
false
);
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadInMeanReduceAllAttr
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
x_mean_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"x_mean_out"
);
ASSERT_NE
(
x_mean_desc
,
nullptr
);
x_mean_desc
->
SetAttr
(
"reduce_all"
,
true
);
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadStdDevMeanDimAttrRank
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
std_dev_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"std_dev_out"
);
ASSERT_NE
(
std_dev_desc
,
nullptr
);
std_dev_desc
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
1
,
1
});
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadStdDevMeanDimAttr
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
std_dev_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"std_dev_out"
);
ASSERT_NE
(
std_dev_desc
,
nullptr
);
std_dev_desc
->
SetAttr
(
"dim"
,
std
::
vector
<
int
>
{
1
});
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadStdDevMeanKeepDimAttr
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
std_dev_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"std_dev_out"
);
ASSERT_NE
(
std_dev_desc
,
nullptr
);
std_dev_desc
->
SetAttr
(
"keep_dim"
,
false
);
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
NoFusionBadStdDevMeanReduceAllAttr
)
{
const
auto
editFun
=
[](
const
BlockDesc
&
block_desc
)
{
auto
*
std_dev_desc
=
test
::
GetOp
(
block_desc
,
"reduce_mean"
,
"Out"
,
"std_dev_out"
);
ASSERT_NE
(
std_dev_desc
,
nullptr
);
std_dev_desc
->
SetAttr
(
"reduce_all"
,
true
);
};
LayerNormFuseTest
lnorm_test
(
editFun
);
lnorm_test
.
setupGraph
();
lnorm_test
.
run
(
false
);
}
TEST
(
FuseLayerNormPass
,
pass_op_version_check
)
{
...
...
paddle/fluid/framework/ir/pass_test_util.cc
浏览文件 @
092a2b14
...
...
@@ -175,10 +175,11 @@ bool RunPassAndAssert(Graph* graph, const std::string& pass_name,
}
template
<
typename
T
>
void
InitLoDTensorHolder
(
Scope
*
scope
,
const
paddle
::
platform
::
Place
&
place
,
void
InitLoDTensorHolder
(
const
Scope
&
scope
,
const
paddle
::
platform
::
Place
&
place
,
const
std
::
string
&
var_name
,
const
std
::
vector
<
int64_t
>&
dims
,
const
T
*
data
)
{
auto
var
=
scope
->
Var
(
var_name
);
auto
var
=
scope
.
FindLocal
Var
(
var_name
);
auto
tensor
=
var
->
GetMutable
<
LoDTensor
>
();
auto
*
tensor_mem_ptr
=
tensor
->
mutable_data
<
T
>
(
make_ddim
(
dims
),
place
);
if
(
data
!=
nullptr
)
{
...
...
@@ -189,14 +190,16 @@ void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place,
}
// Instantiate for below data types.
template
void
InitLoDTensorHolder
<
float
>(
Scope
*
,
const
paddle
::
platform
::
Place
&
,
template
void
InitLoDTensorHolder
<
float
>(
const
Scope
&
,
const
paddle
::
platform
::
Place
&
,
const
std
::
string
&
,
const
std
::
vector
<
int64_t
>&
,
const
float
*
);
template
void
InitLoDTensorHolder
<
int
>(
Scope
*
,
const
paddle
::
platform
::
Place
&
,
template
void
InitLoDTensorHolder
<
int
>(
const
Scope
&
,
const
paddle
::
platform
::
Place
&
,
const
std
::
string
&
,
const
std
::
vector
<
int64_t
>&
,
const
int
*
);
template
void
InitLoDTensorHolder
<
double
>(
Scope
*
,
template
void
InitLoDTensorHolder
<
double
>(
const
Scope
&
,
const
paddle
::
platform
::
Place
&
,
const
std
::
string
&
,
const
std
::
vector
<
int64_t
>&
,
...
...
@@ -205,7 +208,13 @@ template void InitLoDTensorHolder<double>(Scope*,
OpDesc
*
GetOp
(
const
ProgramDesc
&
prog
,
const
std
::
string
&
op_type
,
const
std
::
string
&
output_name
,
const
std
::
string
&
output_arg_name
)
{
auto
all_ops
=
prog
.
Block
(
0
).
AllOps
();
return
GetOp
(
prog
.
Block
(
0
),
op_type
,
output_name
,
output_arg_name
);
}
OpDesc
*
GetOp
(
const
BlockDesc
&
block_desc
,
const
std
::
string
&
op_type
,
const
std
::
string
&
output_name
,
const
std
::
string
&
output_arg_name
)
{
auto
all_ops
=
block_desc
.
AllOps
();
for
(
auto
*
op_desc
:
all_ops
)
{
if
(
op_desc
->
Type
()
==
op_type
&&
op_desc
->
HasOutput
(
output_name
))
{
const
auto
&
arg_names
=
op_desc
->
Outputs
().
at
(
output_name
);
...
...
paddle/fluid/framework/ir/pass_test_util.h
浏览文件 @
092a2b14
...
...
@@ -128,7 +128,8 @@ bool RunPassAndAssert(Graph* graph, const std::string& pass_name,
/// @tparam T Tensor data type.
///
template
<
typename
T
>
void
InitLoDTensorHolder
(
Scope
*
scope
,
const
paddle
::
platform
::
Place
&
place
,
void
InitLoDTensorHolder
(
const
Scope
&
scope
,
const
paddle
::
platform
::
Place
&
place
,
const
std
::
string
&
var_name
,
const
std
::
vector
<
int64_t
>&
dims
,
const
T
*
data
=
nullptr
);
...
...
@@ -148,6 +149,10 @@ OpDesc* GetOp(const ProgramDesc& prog, const std::string& op_type,
const
std
::
string
&
output_name
,
const
std
::
string
&
output_arg_name
);
OpDesc
*
GetOp
(
const
BlockDesc
&
block_desc
,
const
std
::
string
&
op_type
,
const
std
::
string
&
output_name
,
const
std
::
string
&
output_arg_name
);
}
// namespace test
}
// namespace ir
}
// namespace framework
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录