Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e43ea422
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e43ea422
编写于
2月 13, 2023
作者:
W
wangruting
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify test without cinn
上级
29a13edd
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
108 addition
and
56 deletion
+108
-56
python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_layer_norm.py
...unittests/prim/composite_ops/test_composite_layer_norm.py
+4
-4
python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_layer_norm_grad.py
...ests/prim/composite_ops/test_composite_layer_norm_grad.py
+99
-51
python/paddle/fluid/tests/unittests/prim/composite_ops/utils.py
.../paddle/fluid/tests/unittests/prim/composite_ops/utils.py
+5
-1
未找到文件。
python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_layer_norm.py
浏览文件 @
e43ea422
...
...
@@ -72,10 +72,10 @@ def expect_forward(x, norm_shape, w, b):
class
TestCompositelayer_norm
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
dtypes
=
[
"float16"
,
"float32"
]
self
.
n_shape
=
[[
4
],
[
3
],
[
2
,
3
]]
self
.
shape1s
=
[[
3
,
4
],
[
2
,
4
,
3
],
[
2
,
2
,
3
]]
self
.
shape2s
=
[[
4
],
[
3
],
[
6
]]
self
.
shape3s
=
[[
4
],
[
3
],
[
6
]]
self
.
n_shape
=
[[
4
],
[
64
,
128
],
[
64
]]
self
.
shape1s
=
[[
3
,
4
],
[
64
,
64
,
128
],
[
128
,
64
,
64
]]
self
.
shape2s
=
[[
4
],
[
64
*
128
],
[
64
]]
self
.
shape3s
=
[[
4
],
[
64
*
128
],
[
64
]]
def
cal_composite
(
self
,
inputs
,
norm_shape
,
weight
,
bias
):
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_layer_norm_grad.py
浏览文件 @
e43ea422
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
utils
import
TOLERANCE
from
utils
import
SUB_
TOLERANCE
import
paddle
import
paddle.nn.functional
as
F
...
...
@@ -50,11 +50,11 @@ class Attr:
return
def
get_rtol
(
self
,
flag
):
rtol
=
TOLERANCE
[
self
.
dtype
][
flag
].
get
(
"rtol"
)
rtol
=
SUB_
TOLERANCE
[
self
.
dtype
][
flag
].
get
(
"rtol"
)
return
rtol
def
get_atol
(
self
,
flag
):
atol
=
TOLERANCE
[
self
.
dtype
][
flag
].
get
(
"atol"
)
atol
=
SUB_
TOLERANCE
[
self
.
dtype
][
flag
].
get
(
"atol"
)
return
atol
...
...
@@ -65,30 +65,6 @@ def fn(x, norm_shape, w, b):
return
F
.
layer_norm
(
x
,
norm_shape
,
w
,
b
)
# def layer_norm_ (input, weight, bias, epsilon=1e-05, begin_norm_axis = 0):
# axis = np.arange(begin_norm_axis,len(input.shape))
# mean = paddle.mean(input, axis=axis, keepdim=True)
# t1 = input - mean
# t2 = paddle.pow( t1, 2.0)
# t3 = paddle.mean( t2, axis=axis, keepdim=True)
# t4 = t3 + epsilon
# t5 = paddle.sqrt( t4 )
# t7 = t1 / t5
# out = t7
# if weight is not None:
# weight = paddle.reshape(weight, input.shape[begin_norm_axis:])
# out = t7 * paddle.broadcast_to(weight, out.shape)
# if bias is not None:
# bias = paddle.reshape(bias, input.shape[begin_norm_axis:])
# out = out + paddle.broadcast_to(bias, out.shape)
# return out
# def composite_forward(x, norm_shape, w, b):
# b_axis = len(x.shape) - len(norm_shape)
# return layer_norm_(x, w, b, begin_norm_axis=b_axis)
def
expect_backward
(
x
,
norm_shape
,
w
,
b
):
paddle
.
disable_static
()
x
.
stop_gradient
=
False
...
...
@@ -101,10 +77,10 @@ def expect_backward(x, norm_shape, w, b):
class
TestCompositelayer_norm
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
dtypes
=
[
"float16"
,
"float32"
]
self
.
n_shape
=
[[
3
,
4
],
[
3
],
[
2
,
3
]]
self
.
shape1s
=
[[
3
,
4
],
[
2
,
4
,
3
],
[
2
,
2
,
3
]]
self
.
shape2s
=
[[
12
],
[
3
],
[
6
]]
self
.
shape3s
=
[[
12
],
[
3
],
[
6
]]
self
.
n_shape
=
[[
4
],
[
64
,
128
],
[
64
]]
self
.
shape1s
=
[[
3
,
4
],
[
64
,
64
,
128
],
[
128
,
64
,
64
]]
self
.
shape2s
=
[[
4
],
[
64
*
128
],
[
64
]]
self
.
shape3s
=
[[
4
],
[
64
*
128
],
[
64
]]
def
cal_composite_backward
(
self
,
inputs
,
norm_shape
,
weight
,
bias
):
paddle
.
enable_static
()
...
...
@@ -155,6 +131,49 @@ class TestCompositelayer_norm(unittest.TestCase):
core
.
_set_prim_forward_enabled
(
False
)
return
res
def
cal2_composite_backward
(
self
,
inputs
,
norm_shape
,
weight
,
bias
):
paddle
.
enable_static
()
core
.
_set_prim_forward_enabled
(
True
)
startup_program
=
paddle
.
static
.
Program
()
main_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
x
=
paddle
.
static
.
data
(
'x'
,
shape
=
inputs
.
shape
,
dtype
=
str
(
inputs
.
dtype
)
)
x
.
stop_gradient
=
False
y
=
fn
(
x
,
norm_shape
,
weight
,
bias
)
blocks
=
main_program
.
blocks
fwd_ops
=
[
op
.
type
for
op
in
blocks
[
0
].
ops
]
# Ensure that layer_norm in original block
self
.
assertTrue
(
'layer_norm'
in
fwd_ops
)
paddle
.
incubate
.
autograd
.
to_prim
(
blocks
)
fwd_ops_new
=
[
op
.
type
for
op
in
blocks
[
0
].
ops
]
# Ensure that layer_norm is splitted into small ops
self
.
assertTrue
(
'layer_norm'
not
in
fwd_ops_new
)
z
=
paddle
.
static
.
gradients
([
y
],
x
)
fwd_ops_grad
=
[
op
.
type
for
op
in
blocks
[
0
].
ops
]
# Ensure that layer_norm_grad not in grad block
self
.
assertTrue
(
'layer_norm_grad'
not
in
fwd_ops_grad
)
exe
=
paddle
.
static
.
Executor
()
exe
.
run
(
startup_program
)
res
=
exe
.
run
(
main_program
,
feed
=
{
'x'
:
inputs
,
},
fetch_list
=
[
z
],
)
paddle
.
disable_static
()
core
.
_set_prim_forward_enabled
(
False
)
return
res
def
compare_backward
(
self
):
x
,
w
,
b
=
generate_data
(
attrs
.
shape1
,
attrs
.
shape2
,
attrs
.
shape3
)
n_shape
=
attrs
.
n_shape
...
...
@@ -162,25 +181,25 @@ class TestCompositelayer_norm(unittest.TestCase):
w_p
=
paddle
.
to_tensor
(
w
)
b_p
=
paddle
.
to_tensor
(
b
)
expect
=
expect_backward
(
x_p
,
n_shape
,
w_p
,
b_p
).
numpy
()
actual
=
self
.
cal_composite_backward
(
x
_p
,
n_shape
,
w_p
,
b_p
)
expect
=
expect_backward
(
x_p
,
n_shape
,
w_p
,
b_p
)
[
0
]
.
numpy
()
actual
=
self
.
cal_composite_backward
(
x
,
n_shape
,
w
,
b
)[
0
]
assert
expect
.
dtype
==
actual
.
dtype
np
.
testing
.
assert_allclose
(
expect
,
actual
,
rtol
=
attrs
.
get_rtol
(
"
for
ward"
),
atol
=
attrs
.
get_atol
(
"
for
ward"
),
rtol
=
attrs
.
get_rtol
(
"
back
ward"
),
atol
=
attrs
.
get_atol
(
"
back
ward"
),
)
expect_2
=
expect_backward
(
x_p
,
n_shape
,
None
,
None
).
numpy
()
actual_2
=
self
.
cal
_composite_backward
(
x_p
,
n_shape
,
None
,
None
).
numpy
()
expect_2
=
expect_backward
(
x_p
,
n_shape
,
None
,
None
)
[
0
]
.
numpy
()
actual_2
=
self
.
cal
2_composite_backward
(
x
,
n_shape
,
None
,
None
)[
0
]
assert
expect_2
.
dtype
==
actual_2
.
dtype
np
.
testing
.
assert_allclose
(
expect_2
,
actual_2
,
rtol
=
attrs
.
get_rtol
(
"
for
ward"
),
atol
=
attrs
.
get_atol
(
"
for
ward"
),
rtol
=
attrs
.
get_rtol
(
"
back
ward"
),
atol
=
attrs
.
get_atol
(
"
back
ward"
),
)
def
test_backward
(
self
):
...
...
@@ -200,10 +219,10 @@ class TestCompositelayer_normPrimBackward(unittest.TestCase):
def
setUp
(
self
):
core
.
_set_prim_backward_enabled
(
True
)
self
.
dtypes
=
[
"float16"
,
"float32"
]
self
.
n_shape
=
[[
3
,
4
],
[
3
],
[
2
,
3
]]
self
.
shape1s
=
[[
3
,
4
],
[
2
,
4
,
3
],
[
2
,
2
,
3
]]
self
.
shape2s
=
[[
12
],
[
3
],
[
6
]]
self
.
shape3s
=
[[
12
],
[
3
],
[
6
]]
self
.
n_shape
=
[[
4
],
[
64
,
128
],
[
64
]]
self
.
shape1s
=
[[
3
,
4
],
[
64
,
64
,
128
],
[
128
,
64
,
64
]]
self
.
shape2s
=
[[
4
],
[
64
*
128
],
[
64
]]
self
.
shape3s
=
[[
4
],
[
64
*
128
],
[
64
]]
def
cal_composite_backward
(
self
,
inputs
,
norm_shape
,
weight
,
bias
):
paddle
.
enable_static
()
...
...
@@ -240,6 +259,35 @@ class TestCompositelayer_normPrimBackward(unittest.TestCase):
core
.
_set_prim_all_enabled
(
False
)
return
res
def
cal2_composite_backward
(
self
,
inputs
,
norm_shape
,
weight
,
bias
):
paddle
.
enable_static
()
core
.
_set_prim_all_enabled
(
True
)
startup_program
=
paddle
.
static
.
Program
()
main_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
x
=
paddle
.
static
.
data
(
'x'
,
shape
=
inputs
.
shape
,
dtype
=
str
(
inputs
.
dtype
)
)
x
.
stop_gradient
=
False
y
=
fn
(
x
,
norm_shape
,
weight
,
bias
)
blocks
=
main_program
.
blocks
paddle
.
incubate
.
autograd
.
to_prim
(
blocks
)
z
=
paddle
.
static
.
gradients
([
y
],
x
)
exe
=
paddle
.
static
.
Executor
()
exe
.
run
(
startup_program
)
res
=
exe
.
run
(
main_program
,
feed
=
{
'x'
:
inputs
,
},
fetch_list
=
[
z
],
)
paddle
.
disable_static
()
core
.
_set_prim_all_enabled
(
False
)
return
res
def
compare_backward
(
self
):
x
,
w
,
b
=
generate_data
(
attrs
.
shape1
,
attrs
.
shape2
,
attrs
.
shape3
)
n_shape
=
attrs
.
n_shape
...
...
@@ -247,25 +295,25 @@ class TestCompositelayer_normPrimBackward(unittest.TestCase):
w_p
=
paddle
.
to_tensor
(
w
)
b_p
=
paddle
.
to_tensor
(
b
)
expect
=
expect_backward
(
x_p
,
n_shape
,
w_p
,
b_p
).
numpy
()
actual
=
self
.
cal_composite_backward
(
x
_p
,
n_shape
,
w_p
,
b_p
)
expect
=
expect_backward
(
x_p
,
n_shape
,
w_p
,
b_p
)
[
0
]
.
numpy
()
actual
=
self
.
cal_composite_backward
(
x
,
n_shape
,
w
,
b
)[
0
]
assert
expect
.
dtype
==
actual
.
dtype
np
.
testing
.
assert_allclose
(
expect
,
actual
,
rtol
=
attrs
.
get_rtol
(
"
for
ward"
),
atol
=
attrs
.
get_
atol
(
"for
ward"
),
rtol
=
attrs
.
get_rtol
(
"
prim_back
ward"
),
atol
=
attrs
.
get_
rtol
(
"prim_back
ward"
),
)
expect_2
=
expect_backward
(
x_p
,
n_shape
,
None
,
None
).
numpy
()
actual_2
=
self
.
cal
_composite_backward
(
x_p
,
n_shape
,
None
,
None
).
numpy
()
expect_2
=
expect_backward
(
x_p
,
n_shape
,
None
,
None
)
[
0
]
.
numpy
()
actual_2
=
self
.
cal
2_composite_backward
(
x
,
n_shape
,
None
,
None
)[
0
]
assert
expect_2
.
dtype
==
actual_2
.
dtype
np
.
testing
.
assert_allclose
(
expect_2
,
actual_2
,
rtol
=
attrs
.
get_rtol
(
"
for
ward"
),
atol
=
attrs
.
get_atol
(
"
for
ward"
),
rtol
=
attrs
.
get_rtol
(
"
prim_back
ward"
),
atol
=
attrs
.
get_atol
(
"
prim_back
ward"
),
)
def
test_prim_backward
(
self
):
...
...
python/paddle/fluid/tests/unittests/prim/composite_ops/utils.py
浏览文件 @
e43ea422
...
...
@@ -19,7 +19,6 @@ TOLERANCE = {
"backward"
:
{
"rtol"
:
1e-3
,
"atol"
:
1e-3
},
"prim_backward"
:
{
"rtol"
:
1e-3
,
"atol"
:
1e-3
},
},
"float32"
:
{
"forward"
:
{
"rtol"
:
1e-6
,
"atol"
:
1e-6
},
"backward"
:
{
"rtol"
:
1e-6
,
"atol"
:
1e-6
},
...
...
@@ -34,6 +33,11 @@ TOLERANCE = {
# this tolerance is for big composite ops like batch_norm.
SUB_TOLERANCE
=
{
"float16"
:
{
"forward"
:
{
"rtol"
:
1e-3
,
"atol"
:
1e-3
},
"backward"
:
{
"rtol"
:
1e-3
,
"atol"
:
1e-3
},
"prim_backward"
:
{
"rtol"
:
1e-3
,
"atol"
:
1e-3
},
},
"float32"
:
{
"forward"
:
{
"rtol"
:
1e-5
,
"atol"
:
1e-5
},
"backward"
:
{
"rtol"
:
1e-5
,
"atol"
:
1e-5
},
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录