Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
230f3dac
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
230f3dac
编写于
8月 08, 2022
作者:
R
ronnywang
提交者:
GitHub
8月 08, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[XPU] use np.testing.assert_allclose instead of assertTrue(np.allclose(...)), test=kunlun (#44799)
上级
ede0990f
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
54 addition
and
48 deletion
+54
-48
python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py
...dle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py
+13
-11
python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py
.../fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
+11
-11
python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py
+10
-13
python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py
.../tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py
.../fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py
+6
-2
python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py
+10
-7
未找到文件。
python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -70,8 +70,8 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
res = exe.run(main_program,
feed={'x': feed_x},
fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add)
)
self.assertTrue(np.allclose(res[1], ones / 1000.0)
)
np.testing.assert_allclose(res[0], feed_add
)
np.testing.assert_allclose(res[1], ones / 1000.0
)
class TestAssignOpError(unittest.TestCase):
...
...
python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
import
unittest
import
numpy
import
numpy
as
np
import
sys
sys
.
path
.
append
(
".."
)
...
...
@@ -53,7 +53,7 @@ class XPUTestAssignValueOp(XPUOpTestWrapper):
self
.
outputs
=
{
"Out"
:
self
.
value
}
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
self
.
dtype
)
self
.
value
=
n
p
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
self
.
dtype
)
self
.
attrs
[
"fp32_values"
]
=
[
float
(
v
)
for
v
in
self
.
value
.
flat
]
def
test_forward
(
self
):
...
...
@@ -62,20 +62,20 @@ class XPUTestAssignValueOp(XPUOpTestWrapper):
class
TestAssignValueOp2
(
TestAssignValueOp
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
numpy
.
int32
)
self
.
value
=
n
p
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
np
.
int32
)
self
.
attrs
[
"int32_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
class
TestAssignValueOp3
(
TestAssignValueOp
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
numpy
.
int64
)
self
.
value
=
n
p
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
np
.
int64
)
self
.
attrs
[
"int64_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
class
TestAssignValueOp4
(
TestAssignValueOp
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
numpy
.
bool
)
self
.
value
=
n
p
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
np
.
bool
)
self
.
attrs
[
"bool_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
...
...
@@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
value
=
(
-
100
+
200
*
n
umpy
.
random
.
random
(
size
=
(
2
,
5
))).
astype
(
self
.
value
=
(
-
100
+
200
*
n
p
.
random
.
random
(
size
=
(
2
,
5
))).
astype
(
self
.
dtype
)
self
.
place
=
fluid
.
XPUPlace
(
0
)
...
...
@@ -98,8 +98,10 @@ class TestAssignApi(unittest.TestCase):
exe
=
fluid
.
Executor
(
self
.
place
)
[
fetched_x
]
=
exe
.
run
(
main_program
,
feed
=
{},
fetch_list
=
[
x
])
self
.
assertTrue
(
numpy
.
array_equal
(
fetched_x
,
self
.
value
),
"fetch_x=%s val=%s"
%
(
fetched_x
,
self
.
value
))
np
.
testing
.
assert_allclose
(
fetched_x
,
self
.
value
,
err_msg
=
"fetch_x=%s val=%s"
%
(
fetched_x
,
self
.
value
))
self
.
assertEqual
(
fetched_x
.
dtype
,
self
.
value
.
dtype
)
...
...
@@ -119,8 +121,8 @@ class TestAssignApi4(TestAssignApi):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
value
=
n
umpy
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
numpy
.
bool
)
self
.
value
=
n
p
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
np
.
bool
)
self
.
place
=
fluid
.
XPUPlace
(
0
)
def
init_dtype
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -512,7 +512,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase):
expect_res = bilinear_interp_np(
x_data, out_h=12, out_w=12, align_corners=True)
for res in results:
self.assertTrue(np.allclose(res, expect_res)
)
np.testing.assert_allclose(res, expect_res
)
'''
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -186,14 +186,14 @@ class TestClipAPI(unittest.TestCase):
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
,
out_8
])
self
.
assertTrue
(
np
.
allclose
(
res1
,
data
.
clip
(
0.2
,
0.8
)
))
self
.
assertTrue
(
np
.
allclose
(
res2
,
data
.
clip
(
0.2
,
0.9
)
))
self
.
assertTrue
(
np
.
allclose
(
res3
,
data
.
clip
(
min
=
0.3
)
))
self
.
assertTrue
(
np
.
allclose
(
res4
,
data
.
clip
(
max
=
0.7
)
))
self
.
assertTrue
(
np
.
allclose
(
res5
,
data
.
clip
(
min
=
0.2
)
))
self
.
assertTrue
(
np
.
allclose
(
res6
,
data
.
clip
(
max
=
0.8
)
))
self
.
assertTrue
(
np
.
allclose
(
res7
,
data
.
clip
(
max
=-
1
)
))
self
.
assertTrue
(
np
.
allclose
(
res8
,
data
)
)
np
.
testing
.
assert_allclose
(
res1
,
data
.
clip
(
0.2
,
0.8
))
np
.
testing
.
assert_allclose
(
res2
,
data
.
clip
(
0.2
,
0.9
))
np
.
testing
.
assert_allclose
(
res3
,
data
.
clip
(
min
=
0.3
))
np
.
testing
.
assert_allclose
(
res4
,
data
.
clip
(
max
=
0.7
))
np
.
testing
.
assert_allclose
(
res5
,
data
.
clip
(
min
=
0.2
))
np
.
testing
.
assert_allclose
(
res6
,
data
.
clip
(
max
=
0.8
))
np
.
testing
.
assert_allclose
(
res7
,
data
.
clip
(
max
=-
1
))
np
.
testing
.
assert_allclose
(
res8
,
data
)
paddle
.
disable_static
()
def
test_clip_dygraph
(
self
):
...
...
@@ -213,9 +213,9 @@ class TestClipAPI(unittest.TestCase):
images
=
paddle
.
to_tensor
(
data
,
dtype
=
'float32'
)
out_3
=
self
.
_executed_api
(
images
,
min
=
v_min
,
max
=
v_max
)
self
.
assertTrue
(
np
.
allclose
(
out_1
.
numpy
(),
data
.
clip
(
0.2
,
0.8
)
))
self
.
assertTrue
(
np
.
allclose
(
out_2
.
numpy
(),
data
.
clip
(
0.2
,
0.9
)
))
self
.
assertTrue
(
np
.
allclose
(
out_3
.
numpy
(),
data
.
clip
(
0.2
,
0.8
)
))
np
.
testing
.
assert_allclose
(
out_1
.
numpy
(),
data
.
clip
(
0.2
,
0.8
))
np
.
testing
.
assert_allclose
(
out_2
.
numpy
(),
data
.
clip
(
0.2
,
0.9
))
np
.
testing
.
assert_allclose
(
out_3
.
numpy
(),
data
.
clip
(
0.2
,
0.8
))
def
test_errors
(
self
):
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -164,7 +164,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
m
=
paddle
.
nn
.
Dropout
(
p
=
0.
)
m
.
eval
()
result
=
m
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
result_np
)
)
np
.
testing
.
assert_allclose
(
result
.
numpy
(),
result_np
)
class
TestDropoutBackward
(
unittest
.
TestCase
):
...
...
@@ -188,10 +188,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
out
,
mask
=
core
.
ops
.
dropout
(
input
,
'dropout_prob'
,
0.5
)
out
.
backward
()
self
.
assertTrue
(
np
.
array_equal
(
input
.
gradient
(),
self
.
cal_grad_downscale_in_infer
(
mask
.
numpy
())))
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
self
.
cal_grad_downscale_in_infer
(
mask
.
numpy
()))
def
test_backward_upscale_train
(
self
):
for
place
in
self
.
places
:
...
...
@@ -205,10 +204,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
"upscale_in_train"
)
out
.
backward
()
self
.
assertTrue
(
np
.
allclose
(
input
.
gradient
(),
self
.
cal_grad_upscale_train
(
mask
.
numpy
(),
prob
)))
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
self
.
cal_grad_upscale_train
(
mask
.
numpy
(),
prob
))
def
test_backward_upscale_train_2
(
self
):
for
place
in
self
.
places
:
...
...
@@ -222,10 +220,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
"upscale_in_train"
)
out
.
backward
()
self
.
assertTrue
(
np
.
allclose
(
input
.
gradient
(),
self
.
cal_grad_upscale_train
(
mask
.
numpy
(),
prob
)))
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
self
.
cal_grad_upscale_train
(
mask
.
numpy
(),
prob
))
support_types
=
get_xpu_op_support_types
(
'dropout'
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py
浏览文件 @
230f3dac
...
...
@@ -86,7 +86,7 @@ class TestDistModelRun(unittest.TestCase):
print
(
"load inference model api rst:"
,
load_inference_model_rst
)
# step 5: compare two results
self
.
assertTrue
(
np
.
allclose
(
dist_model_rst
,
load_inference_model_rst
)
)
np
.
testing
.
assert_allclose
(
dist_model_rst
,
load_inference_model_rst
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -75,8 +75,12 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper):
hist2
,
_
=
np
.
histogram
(
data
,
range
=
(
-
3
,
5
))
hist2
=
hist2
.
astype
(
"float32"
)
hist2
/=
float
(
outs
[
0
].
size
)
self
.
assertTrue
(
np
.
allclose
(
hist
,
hist2
,
rtol
=
0
,
atol
=
0.01
),
"hist: "
+
str
(
hist
)
+
" hist2: "
+
str
(
hist2
))
np
.
testing
.
assert_allclose
(
hist
,
hist2
,
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"hist: "
+
str
(
hist
)
+
" hist2: "
+
str
(
hist2
))
class
TestMeanStdAreInt
(
TestGaussianRandomOp
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py
浏览文件 @
230f3dac
...
...
@@ -214,9 +214,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper):
expected_result
=
np
.
matmul
(
data1
.
reshape
(
1
,
2
),
data2
.
reshape
(
2
,
1
))
self
.
assertTrue
(
np
.
allclose
(
np_res
,
expected_result
,
atol
=
1e-3
),
"two value is
\
np
.
testing
.
assert_allclose
(
np_res
,
expected_result
,
atol
=
1e-3
,
err_msg
=
"two value is
\
{}
\n
{}, check diff!"
.
format
(
np_res
,
expected_result
))
def
test_dygraph_without_out
(
self
):
...
...
@@ -228,8 +229,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper):
data2
=
fluid
.
dygraph
.
to_variable
(
input_array2
)
out
=
paddle
.
mm
(
data1
,
data2
)
expected_result
=
np
.
matmul
(
input_array1
,
input_array2
)
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
(),
atol
=
1e-3
))
np
.
testing
.
assert_allclose
(
expected_result
,
out
.
numpy
(),
atol
=
1e-3
)
class
Test_API_Matmul
(
unittest
.
TestCase
):
...
...
@@ -244,8 +246,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper):
self
.
in_type
)
out
=
paddle
.
matmul
(
data1
,
data2
)
expected_result
=
np
.
matmul
(
input_array1
,
input_array2
)
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
(),
atol
=
1e-3
))
np
.
testing
.
assert_allclose
(
expected_result
,
out
.
numpy
(),
atol
=
1e-3
)
class
API_TestMmError
(
unittest
.
TestCase
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录