Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
73be70a3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
73be70a3
编写于
8月 08, 2022
作者:
R
ronnywang
提交者:
GitHub
8月 08, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[NPU] use np.testing.assert_allclose instead of assertTrue(np.allclose(...)) (#44798)
上级
99fb293c
变更
58
隐藏空白更改
内联
并排
Showing
58 changed file
with
296 addition
and
265 deletion
+296
-265
python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py
...s/unittests/npu/test_amp_check_finite_and_scale_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py
...dle/fluid/tests/unittests/npu/test_assign_value_op_npu.py
+14
-12
python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py
...addle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py
+6
-3
python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py
python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py
+18
-14
python/paddle/fluid/tests/unittests/npu/test_beam_search_decode_op_npu.py
...uid/tests/unittests/npu/test_beam_search_decode_op_npu.py
+2
-3
python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py
+11
-11
python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py
...dle/fluid/tests/unittests/npu/test_collective_base_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py
...on/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py
+2
-4
python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py
...on/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py
+8
-8
python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py
...n/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py
.../fluid/tests/unittests/npu/test_elementwise_div_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py
.../fluid/tests/unittests/npu/test_elementwise_max_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py
.../fluid/tests/unittests/npu/test_elementwise_min_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py
.../fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py
.../fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py
...on/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py
...paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py
...on/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py
.../fluid/tests/unittests/npu/test_gaussian_random_op_npu.py
+6
-2
python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py
...dle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py
...addle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py
+14
-10
python/paddle/fluid/tests/unittests/npu/test_index_select_op_npu.py
...dle/fluid/tests/unittests/npu/test_index_select_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py
...addle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py
...addle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py
+5
-4
python/paddle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py
...addle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py
...ddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py
...on/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_merged_momentum_op_npu.py
.../fluid/tests/unittests/npu/test_merged_momentum_op_npu.py
+3
-3
python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py
.../paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py
+12
-12
python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py
+10
-10
python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py
...ddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py
+24
-12
python/paddle/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py
...e/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py
+3
-3
python/paddle/fluid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py
...luid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py
+14
-14
python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py
...addle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py
...n/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py
...n/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py
...s/unittests/npu/test_softmax_with_cross_entropy_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py
+10
-10
python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py
...on/paddle/fluid/tests/unittests/npu/test_square_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py
...n/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py
+6
-6
python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py
+7
-9
python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py
+2
-2
python/paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py
.../paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py
+26
-26
python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py
...paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py
+4
-4
python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py
...ts/unittests/npu/test_truncated_gaussian_random_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py
...e/fluid/tests/unittests/npu/test_uniform_random_op_npu.py
+10
-4
未找到文件。
python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py
浏览文件 @
73be70a3
...
@@ -304,8 +304,8 @@ class TestNet(unittest.TestCase):
...
@@ -304,8 +304,8 @@ class TestNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
class
TestNetWithEpsilonTensor
(
unittest
.
TestCase
):
class
TestNetWithEpsilonTensor
(
unittest
.
TestCase
):
...
@@ -447,9 +447,9 @@ class TestNetWithEpsilonTensor(unittest.TestCase):
...
@@ -447,9 +447,9 @@ class TestNetWithEpsilonTensor(unittest.TestCase):
preds
.
append
(
pred
)
preds
.
append
(
pred
)
losses
.
append
(
loss
)
losses
.
append
(
loss
)
for
pred
in
preds
:
for
pred
in
preds
:
self
.
assertTrue
(
np
.
allclose
(
pred
,
preds
[
0
])
)
np
.
testing
.
assert_allclose
(
pred
,
preds
[
0
]
)
for
loss
in
losses
:
for
loss
in
losses
:
self
.
assertTrue
(
np
.
allclose
(
loss
,
losses
[
0
])
)
np
.
testing
.
assert_allclose
(
loss
,
losses
[
0
]
)
def
test_adam_api
(
self
):
def
test_adam_api
(
self
):
# NOTE(zhiqiu): cpu and gpu has different seed, so should compare separatly.
# NOTE(zhiqiu): cpu and gpu has different seed, so should compare separatly.
...
...
python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py
浏览文件 @
73be70a3
...
@@ -249,8 +249,8 @@ class TestNet(unittest.TestCase):
...
@@ -249,8 +249,8 @@ class TestNet(unittest.TestCase):
def
test_npu
(
self
):
def
test_npu
(
self
):
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
5e-3
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
5e-3
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py
浏览文件 @
73be70a3
...
@@ -95,7 +95,7 @@ class TestCheckFiniteAndUnscale(unittest.TestCase):
...
@@ -95,7 +95,7 @@ class TestCheckFiniteAndUnscale(unittest.TestCase):
out
,
found_inf
=
self
.
run_prog
(
a
,
b
,
scale
)
out
,
found_inf
=
self
.
run_prog
(
a
,
b
,
scale
)
print
(
out
,
found_inf
)
print
(
out
,
found_inf
)
self
.
assertTrue
(
np
.
allclose
(
out
,
(
a
/
b
)
/
scale
[
0
])
)
np
.
testing
.
assert_allclose
(
out
,
(
a
/
b
)
/
scale
[
0
]
)
self
.
assertFalse
(
found_inf
[
0
])
self
.
assertFalse
(
found_inf
[
0
])
...
@@ -159,7 +159,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
...
@@ -159,7 +159,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
out
,
found_inf
=
self
.
run_prog
(
a
,
b
,
scale
)
out
,
found_inf
=
self
.
run_prog
(
a
,
b
,
scale
)
print
(
out
,
found_inf
)
print
(
out
,
found_inf
)
self
.
assertTrue
(
np
.
allclose
(
out
,
(
a
+
b
)
/
scale
[
0
])
)
np
.
testing
.
assert_allclose
(
out
,
(
a
+
b
)
/
scale
[
0
]
)
self
.
assertFalse
(
found_inf
[
0
])
self
.
assertFalse
(
found_inf
[
0
])
...
...
python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py
浏览文件 @
73be70a3
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
unittest
import
unittest
import
numpy
import
numpy
as
np
import
sys
import
sys
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
...
@@ -27,7 +27,7 @@ import paddle.fluid.framework as framework
...
@@ -27,7 +27,7 @@ import paddle.fluid.framework as framework
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
paddle
.
enable_static
()
paddle
.
enable_static
()
n
umpy
.
random
.
seed
(
2021
)
n
p
.
random
.
seed
(
2021
)
class
TestAssignValueNPUOp
(
op_test
.
OpTest
):
class
TestAssignValueNPUOp
(
op_test
.
OpTest
):
...
@@ -50,7 +50,7 @@ class TestAssignValueNPUOp(op_test.OpTest):
...
@@ -50,7 +50,7 @@ class TestAssignValueNPUOp(op_test.OpTest):
self
.
__class__
.
use_npu
=
True
self
.
__class__
.
use_npu
=
True
def
init_data
(
self
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
numpy
.
float32
)
self
.
value
=
n
p
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
np
.
float32
)
self
.
attrs
[
"fp32_values"
]
=
[
float
(
v
)
for
v
in
self
.
value
.
flat
]
self
.
attrs
[
"fp32_values"
]
=
[
float
(
v
)
for
v
in
self
.
value
.
flat
]
def
test_forward
(
self
):
def
test_forward
(
self
):
...
@@ -60,22 +60,22 @@ class TestAssignValueNPUOp(op_test.OpTest):
...
@@ -60,22 +60,22 @@ class TestAssignValueNPUOp(op_test.OpTest):
class
TestAssignValueNPUOp2
(
TestAssignValueNPUOp
):
class
TestAssignValueNPUOp2
(
TestAssignValueNPUOp
):
def
init_data
(
self
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
numpy
.
int32
)
self
.
value
=
n
p
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
np
.
int32
)
self
.
attrs
[
"int32_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
self
.
attrs
[
"int32_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
class
TestAssignValueNPUOp3
(
TestAssignValueNPUOp
):
class
TestAssignValueNPUOp3
(
TestAssignValueNPUOp
):
def
init_data
(
self
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
numpy
.
int64
)
self
.
value
=
n
p
.
random
.
random
(
size
=
(
2
,
5
)).
astype
(
np
.
int64
)
self
.
attrs
[
"int64_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
self
.
attrs
[
"int64_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
class
TestAssignValueNPUOp4
(
TestAssignValueNPUOp
):
class
TestAssignValueNPUOp4
(
TestAssignValueNPUOp
):
def
init_data
(
self
):
def
init_data
(
self
):
self
.
value
=
n
umpy
.
random
.
choice
(
a
=
[
False
,
True
],
self
.
value
=
n
p
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
numpy
.
bool
)
size
=
(
2
,
5
)).
astype
(
np
.
bool
)
self
.
attrs
[
"bool_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
self
.
attrs
[
"bool_values"
]
=
[
int
(
v
)
for
v
in
self
.
value
.
flat
]
...
@@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase):
...
@@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
init_dtype
()
self
.
value
=
(
-
100
+
200
*
n
umpy
.
random
.
random
(
size
=
(
2
,
5
))).
astype
(
self
.
value
=
(
-
100
+
200
*
n
p
.
random
.
random
(
size
=
(
2
,
5
))).
astype
(
self
.
dtype
)
self
.
dtype
)
self
.
place
=
fluid
.
NPUPlace
(
self
.
place
=
fluid
.
NPUPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_npu
()
else
fluid
.
CPUPlace
()
0
)
if
fluid
.
core
.
is_compiled_with_npu
()
else
fluid
.
CPUPlace
()
...
@@ -99,8 +99,10 @@ class TestAssignApi(unittest.TestCase):
...
@@ -99,8 +99,10 @@ class TestAssignApi(unittest.TestCase):
exe
=
fluid
.
Executor
(
self
.
place
)
exe
=
fluid
.
Executor
(
self
.
place
)
[
fetched_x
]
=
exe
.
run
(
main_program
,
feed
=
{},
fetch_list
=
[
x
])
[
fetched_x
]
=
exe
.
run
(
main_program
,
feed
=
{},
fetch_list
=
[
x
])
self
.
assertTrue
(
numpy
.
array_equal
(
fetched_x
,
self
.
value
),
np
.
testing
.
assert_allclose
(
fetched_x
,
"fetch_x=%s val=%s"
%
(
fetched_x
,
self
.
value
))
self
.
value
,
err_msg
=
"fetch_x=%s val=%s"
%
(
fetched_x
,
self
.
value
))
self
.
assertEqual
(
fetched_x
.
dtype
,
self
.
value
.
dtype
)
self
.
assertEqual
(
fetched_x
.
dtype
,
self
.
value
.
dtype
)
...
@@ -120,8 +122,8 @@ class TestAssignApi4(TestAssignApi):
...
@@ -120,8 +122,8 @@ class TestAssignApi4(TestAssignApi):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
init_dtype
()
self
.
value
=
n
umpy
.
random
.
choice
(
a
=
[
False
,
True
],
self
.
value
=
n
p
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
numpy
.
bool
)
size
=
(
2
,
5
)).
astype
(
np
.
bool
)
self
.
place
=
fluid
.
NPUPlace
(
self
.
place
=
fluid
.
NPUPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_npu
()
else
fluid
.
CPUPlace
()
0
)
if
fluid
.
core
.
is_compiled_with_npu
()
else
fluid
.
CPUPlace
()
...
...
python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py
浏览文件 @
73be70a3
...
@@ -40,7 +40,10 @@ class TestBatchNormOpInference(unittest.TestCase):
...
@@ -40,7 +40,10 @@ class TestBatchNormOpInference(unittest.TestCase):
self
.
data_formats
=
[
"NCHW"
,
"NHWC"
]
self
.
data_formats
=
[
"NCHW"
,
"NHWC"
]
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
self
.
assertTrue
(
np
.
allclose
(
np
.
array
(
tensor
),
np_array
,
atol
=
atol
),
msg
)
np
.
testing
.
assert_allclose
(
np
.
array
(
tensor
),
np_array
,
atol
=
atol
,
err_msg
=
msg
)
def
check_with_place
(
self
,
place
,
data_layout
,
dtype
,
shape
):
def
check_with_place
(
self
,
place
,
data_layout
,
dtype
,
shape
):
epsilon
=
epsilon
=
0.00001
epsilon
=
epsilon
=
0.00001
...
@@ -475,7 +478,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
...
@@ -475,7 +478,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
x
=
np
.
random
.
randn
(
*
shape
).
astype
(
"float32"
)
x
=
np
.
random
.
randn
(
*
shape
).
astype
(
"float32"
)
y1
=
compute
(
x
,
False
,
False
)
y1
=
compute
(
x
,
False
,
False
)
y2
=
compute
(
x
,
True
,
True
)
y2
=
compute
(
x
,
True
,
True
)
self
.
assertTrue
(
np
.
allclose
(
y1
,
y2
)
)
np
.
testing
.
assert_allclose
(
y1
,
y2
,
rtol
=
1e-5
)
def
test_static
(
self
):
def
test_static
(
self
):
places
=
[
fluid
.
NPUPlace
(
0
)]
places
=
[
fluid
.
NPUPlace
(
0
)]
...
@@ -498,7 +501,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
...
@@ -498,7 +501,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
x
=
np
.
random
.
randn
(
*
shape
).
astype
(
"float32"
)
x
=
np
.
random
.
randn
(
*
shape
).
astype
(
"float32"
)
y1
=
compute
(
x
,
False
,
False
)
y1
=
compute
(
x
,
False
,
False
)
y2
=
compute
(
x
,
True
,
True
)
y2
=
compute
(
x
,
True
,
True
)
self
.
assertTrue
(
np
.
allclose
(
y1
,
y2
,
atol
=
1e-5
)
)
np
.
testing
.
assert_allclose
(
y1
,
y2
,
atol
=
1e-5
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py
浏览文件 @
73be70a3
...
@@ -60,7 +60,7 @@ def test_static_layer(place,
...
@@ -60,7 +60,7 @@ def test_static_layer(place,
"weight"
:
weight_np
"weight"
:
weight_np
},
},
fetch_list
=
[
res
])
fetch_list
=
[
res
])
return
static_result
return
static_result
[
0
]
def
test_static_functional
(
place
,
def
test_static_functional
(
place
,
...
@@ -100,7 +100,7 @@ def test_static_functional(place,
...
@@ -100,7 +100,7 @@ def test_static_functional(place,
"weight"
:
weight_np
"weight"
:
weight_np
},
},
fetch_list
=
[
res
])
fetch_list
=
[
res
])
return
static_result
return
static_result
[
0
]
def
test_dygraph_layer
(
place
,
def
test_dygraph_layer
(
place
,
...
@@ -178,16 +178,18 @@ class TestBCELoss(unittest.TestCase):
...
@@ -178,16 +178,18 @@ class TestBCELoss(unittest.TestCase):
dy_result
=
test_dygraph_layer
(
place
,
input_np
,
label_np
,
dy_result
=
test_dygraph_layer
(
place
,
input_np
,
label_np
,
reduction
)
reduction
)
expected
=
calc_bceloss
(
input_np
,
label_np
,
reduction
)
expected
=
calc_bceloss
(
input_np
,
label_np
,
reduction
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
expected
)
)
np
.
testing
.
assert_allclose
(
static_result
,
expected
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
dy_result
)
)
np
.
testing
.
assert_allclose
(
static_result
,
dy_result
)
self
.
assertTrue
(
np
.
allclose
(
dy_result
,
expected
)
)
np
.
testing
.
assert_allclose
(
dy_result
,
expected
,
rtol
=
1e-6
)
static_functional
=
test_static_functional
(
static_functional
=
test_static_functional
(
place
,
input_np
,
label_np
,
reduction
)
place
,
input_np
,
label_np
,
reduction
)
dy_functional
=
test_dygraph_functional
(
place
,
input_np
,
dy_functional
=
test_dygraph_functional
(
place
,
input_np
,
label_np
,
reduction
)
label_np
,
reduction
)
self
.
assertTrue
(
np
.
allclose
(
static_functional
,
expected
))
np
.
testing
.
assert_allclose
(
static_functional
,
self
.
assertTrue
(
np
.
allclose
(
static_functional
,
dy_functional
))
expected
,
self
.
assertTrue
(
np
.
allclose
(
dy_functional
,
expected
))
rtol
=
1e-6
)
np
.
testing
.
assert_allclose
(
static_functional
,
dy_functional
)
np
.
testing
.
assert_allclose
(
dy_functional
,
expected
,
rtol
=
1e-6
)
def
test_BCELoss_weight
(
self
):
def
test_BCELoss_weight
(
self
):
input_np
=
np
.
random
.
uniform
(
0.1
,
0.8
,
input_np
=
np
.
random
.
uniform
(
0.1
,
0.8
,
...
@@ -212,9 +214,9 @@ class TestBCELoss(unittest.TestCase):
...
@@ -212,9 +214,9 @@ class TestBCELoss(unittest.TestCase):
label_np
,
label_np
,
reduction
,
reduction
,
weight_np
=
weight_np
)
weight_np
=
weight_np
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
expected
)
)
np
.
testing
.
assert_allclose
(
static_result
,
expected
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
dy_result
)
)
np
.
testing
.
assert_allclose
(
static_result
,
dy_result
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
dy_result
,
expected
)
)
np
.
testing
.
assert_allclose
(
dy_result
,
expected
,
rtol
=
1e-6
)
static_functional
=
test_static_functional
(
place
,
static_functional
=
test_static_functional
(
place
,
input_np
,
input_np
,
label_np
,
label_np
,
...
@@ -225,9 +227,11 @@ class TestBCELoss(unittest.TestCase):
...
@@ -225,9 +227,11 @@ class TestBCELoss(unittest.TestCase):
label_np
,
label_np
,
reduction
,
reduction
,
weight_np
=
weight_np
)
weight_np
=
weight_np
)
self
.
assertTrue
(
np
.
allclose
(
static_functional
,
expected
))
np
.
testing
.
assert_allclose
(
static_functional
,
expected
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
static_functional
,
dy_functional
))
np
.
testing
.
assert_allclose
(
static_functional
,
self
.
assertTrue
(
np
.
allclose
(
dy_functional
,
expected
))
dy_functional
,
rtol
=
1e-6
)
np
.
testing
.
assert_allclose
(
dy_functional
,
expected
,
rtol
=
1e-6
)
def
test_BCELoss_error
(
self
):
def
test_BCELoss_error
(
self
):
paddle
.
disable_static
(
paddle
.
NPUPlace
(
0
))
paddle
.
disable_static
(
paddle
.
NPUPlace
(
0
))
...
...
python/paddle/fluid/tests/unittests/npu/test_beam_search_decode_op_npu.py
浏览文件 @
73be70a3
...
@@ -92,9 +92,8 @@ class TestBeamSearchDecodeNPUOp(unittest.TestCase):
...
@@ -92,9 +92,8 @@ class TestBeamSearchDecodeNPUOp(unittest.TestCase):
expected_data
=
np
.
array
(
expected_data
=
np
.
array
(
[
0
,
2
,
3
,
1
,
0
,
2
,
1
,
0
,
4
,
5
,
3
,
5
,
0
,
4
,
5
,
3
,
1
],
"int64"
)
[
0
,
2
,
3
,
1
,
0
,
2
,
1
,
0
,
4
,
5
,
3
,
5
,
0
,
4
,
5
,
3
,
1
],
"int64"
)
self
.
assertTrue
(
np
.
array_equal
(
np
.
array
(
sentence_ids
),
expected_data
))
np
.
testing
.
assert_array_equal
(
np
.
array
(
sentence_ids
),
expected_data
)
self
.
assertTrue
(
np
.
array_equal
(
np
.
array
(
sentence_scores
),
np
.
testing
.
assert_array_equal
(
np
.
array
(
sentence_scores
),
expected_data
)
expected_data
))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py
浏览文件 @
73be70a3
...
@@ -173,14 +173,14 @@ class TestClipAPI(unittest.TestCase):
...
@@ -173,14 +173,14 @@ class TestClipAPI(unittest.TestCase):
},
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
,
out_8
])
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
,
out_8
])
self
.
assertTrue
(
np
.
allclose
(
res1
,
data
.
clip
(
0.2
,
0.8
)
))
np
.
testing
.
assert_allclose
(
res1
,
data
.
clip
(
0.2
,
0.8
))
self
.
assertTrue
(
np
.
allclose
(
res2
,
data
.
clip
(
0.2
,
0.9
)
))
np
.
testing
.
assert_allclose
(
res2
,
data
.
clip
(
0.2
,
0.9
))
self
.
assertTrue
(
np
.
allclose
(
res3
,
data
.
clip
(
min
=
0.3
)
))
np
.
testing
.
assert_allclose
(
res3
,
data
.
clip
(
min
=
0.3
))
self
.
assertTrue
(
np
.
allclose
(
res4
,
data
.
clip
(
max
=
0.7
)
))
np
.
testing
.
assert_allclose
(
res4
,
data
.
clip
(
max
=
0.7
))
self
.
assertTrue
(
np
.
allclose
(
res5
,
data
.
clip
(
min
=
0.2
)
))
np
.
testing
.
assert_allclose
(
res5
,
data
.
clip
(
min
=
0.2
))
self
.
assertTrue
(
np
.
allclose
(
res6
,
data
.
clip
(
max
=
0.8
)
))
np
.
testing
.
assert_allclose
(
res6
,
data
.
clip
(
max
=
0.8
))
self
.
assertTrue
(
np
.
allclose
(
res7
,
data
.
clip
(
max
=-
1
)
))
np
.
testing
.
assert_allclose
(
res7
,
data
.
clip
(
max
=-
1
))
self
.
assertTrue
(
np
.
allclose
(
res8
,
data
)
)
np
.
testing
.
assert_allclose
(
res8
,
data
)
paddle
.
disable_static
()
paddle
.
disable_static
()
def
test_clip_dygraph
(
self
):
def
test_clip_dygraph
(
self
):
...
@@ -200,9 +200,9 @@ class TestClipAPI(unittest.TestCase):
...
@@ -200,9 +200,9 @@ class TestClipAPI(unittest.TestCase):
images
=
paddle
.
to_tensor
(
data
,
dtype
=
'float32'
)
images
=
paddle
.
to_tensor
(
data
,
dtype
=
'float32'
)
out_3
=
self
.
_executed_api
(
images
,
min
=
v_min
,
max
=
v_max
)
out_3
=
self
.
_executed_api
(
images
,
min
=
v_min
,
max
=
v_max
)
self
.
assertTrue
(
np
.
allclose
(
out_1
.
numpy
(),
data
.
clip
(
0.2
,
0.8
)
))
np
.
testing
.
assert_allclose
(
out_1
.
numpy
(),
data
.
clip
(
0.2
,
0.8
))
self
.
assertTrue
(
np
.
allclose
(
out_2
.
numpy
(),
data
.
clip
(
0.2
,
0.9
)
))
np
.
testing
.
assert_allclose
(
out_2
.
numpy
(),
data
.
clip
(
0.2
,
0.9
))
self
.
assertTrue
(
np
.
allclose
(
out_3
.
numpy
(),
data
.
clip
(
0.2
,
0.8
)
))
np
.
testing
.
assert_allclose
(
out_3
.
numpy
(),
data
.
clip
(
0.2
,
0.8
))
def
test_errors
(
self
):
def
test_errors
(
self
):
paddle
.
enable_static
()
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py
浏览文件 @
73be70a3
...
@@ -216,5 +216,5 @@ class TestDistBase(unittest.TestCase):
...
@@ -216,5 +216,5 @@ class TestDistBase(unittest.TestCase):
if
col_type
==
"identity"
:
if
col_type
==
"identity"
:
need_result1
=
input1
need_result1
=
input1
need_result2
=
input2
need_result2
=
input2
self
.
assertTrue
(
np
.
allclose
(
tr0_out
,
need_result1
,
rtol
=
0
,
atol
=
0
)
)
np
.
testing
.
assert_allclose
(
tr0_out
,
need_result1
,
rtol
=
0
,
atol
=
0
)
self
.
assertTrue
(
np
.
allclose
(
tr1_out
,
need_result2
,
rtol
=
0
,
atol
=
0
)
)
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result2
,
rtol
=
0
,
atol
=
0
)
python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py
浏览文件 @
73be70a3
...
@@ -218,10 +218,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
...
@@ -218,10 +218,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
self
.
assertTrue
(
self
.
out_var
.
shape
[
self
.
axis
]
==
-
1
)
self
.
assertTrue
(
self
.
out_var
.
shape
[
self
.
axis
]
==
-
1
)
exe
=
fluid
.
Executor
(
self
.
place
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
self
.
program
,
fetch_list
=
self
.
out_var
)
res
=
exe
.
run
(
self
.
program
,
fetch_list
=
self
.
out_var
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
np
.
array_equal
(
res
[
0
],
np
.
concatenate
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
))
res
[
0
],
np
.
concatenate
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
)))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py
浏览文件 @
73be70a3
...
@@ -142,8 +142,8 @@ class TestCosNet(unittest.TestCase):
...
@@ -142,8 +142,8 @@ class TestCosNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py
浏览文件 @
73be70a3
...
@@ -33,15 +33,15 @@ class TestCumsumOp(unittest.TestCase):
...
@@ -33,15 +33,15 @@ class TestCumsumOp(unittest.TestCase):
y
=
paddle
.
cumsum
(
data
)
y
=
paddle
.
cumsum
(
data
)
z
=
np
.
cumsum
(
data_np
)
z
=
np
.
cumsum
(
data_np
)
self
.
assertTrue
(
np
.
array_equal
(
z
,
y
.
numpy
()
))
np
.
testing
.
assert_array_equal
(
z
,
y
.
numpy
(
))
y
=
paddle
.
cumsum
(
data
,
axis
=
0
)
y
=
paddle
.
cumsum
(
data
,
axis
=
0
)
z
=
np
.
cumsum
(
data_np
,
axis
=
0
)
z
=
np
.
cumsum
(
data_np
,
axis
=
0
)
self
.
assertTrue
(
np
.
array_equal
(
z
,
y
.
numpy
()
))
np
.
testing
.
assert_array_equal
(
z
,
y
.
numpy
(
))
y
=
paddle
.
cumsum
(
data
,
axis
=-
1
)
y
=
paddle
.
cumsum
(
data
,
axis
=-
1
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
1
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
1
)
self
.
assertTrue
(
np
.
array_equal
(
z
,
y
.
numpy
()
))
np
.
testing
.
assert_array_equal
(
z
,
y
.
numpy
(
))
y
=
paddle
.
cumsum
(
data
,
dtype
=
'float32'
)
y
=
paddle
.
cumsum
(
data
,
dtype
=
'float32'
)
self
.
assertTrue
(
y
.
dtype
==
core
.
VarDesc
.
VarType
.
FP32
)
self
.
assertTrue
(
y
.
dtype
==
core
.
VarDesc
.
VarType
.
FP32
)
...
@@ -51,7 +51,7 @@ class TestCumsumOp(unittest.TestCase):
...
@@ -51,7 +51,7 @@ class TestCumsumOp(unittest.TestCase):
y
=
paddle
.
cumsum
(
data
,
axis
=-
2
)
y
=
paddle
.
cumsum
(
data
,
axis
=-
2
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
2
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
2
)
self
.
assertTrue
(
np
.
array_equal
(
z
,
y
.
numpy
()
))
np
.
testing
.
assert_array_equal
(
z
,
y
.
numpy
(
))
def
run_static
(
self
,
use_npu
=
False
):
def
run_static
(
self
,
use_npu
=
False
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
with
fluid
.
program_guard
(
fluid
.
Program
()):
...
@@ -74,15 +74,15 @@ class TestCumsumOp(unittest.TestCase):
...
@@ -74,15 +74,15 @@ class TestCumsumOp(unittest.TestCase):
])
])
z
=
np
.
cumsum
(
data_np
)
z
=
np
.
cumsum
(
data_np
)
self
.
assertTrue
(
np
.
allclose
(
z
,
out
[
0
])
)
np
.
testing
.
assert_allclose
(
z
,
out
[
0
]
)
z
=
np
.
cumsum
(
data_np
,
axis
=
0
)
z
=
np
.
cumsum
(
data_np
,
axis
=
0
)
self
.
assertTrue
(
np
.
allclose
(
z
,
out
[
1
])
)
np
.
testing
.
assert_allclose
(
z
,
out
[
1
]
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
1
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
1
)
self
.
assertTrue
(
np
.
allclose
(
z
,
out
[
2
])
)
np
.
testing
.
assert_allclose
(
z
,
out
[
2
]
)
self
.
assertTrue
(
out
[
3
].
dtype
==
np
.
float32
)
self
.
assertTrue
(
out
[
3
].
dtype
==
np
.
float32
)
self
.
assertTrue
(
out
[
4
].
dtype
==
np
.
int32
)
self
.
assertTrue
(
out
[
4
].
dtype
==
np
.
int32
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
2
)
z
=
np
.
cumsum
(
data_np
,
axis
=-
2
)
self
.
assertTrue
(
np
.
allclose
(
z
,
out
[
5
])
)
np
.
testing
.
assert_allclose
(
z
,
out
[
5
]
)
def
test_npu
(
self
):
def
test_npu
(
self
):
# Now, npu tests need setting paddle.enable_static()
# Now, npu tests need setting paddle.enable_static()
...
...
python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py
浏览文件 @
73be70a3
...
@@ -269,11 +269,11 @@ class TestDropoutAPI(unittest.TestCase):
...
@@ -269,11 +269,11 @@ class TestDropoutAPI(unittest.TestCase):
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"input"
:
in_np
},
feed
=
{
"input"
:
in_np
},
fetch_list
=
[
res
])
fetch_list
=
[
res
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
res_np
)
)
np
.
testing
.
assert_allclose
(
fetches
[
0
],
res_np
)
fetches2
=
exe
.
run
(
fluid
.
default_main_program
(),
fetches2
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"input"
:
in_np
},
feed
=
{
"input"
:
in_np
},
fetch_list
=
[
res6
])
fetch_list
=
[
res6
])
self
.
assertTrue
(
np
.
allclose
(
fetches2
[
0
],
res_np2
)
)
np
.
testing
.
assert_allclose
(
fetches2
[
0
],
res_np2
)
def
test_static
(
self
):
def
test_static
(
self
):
for
place
in
self
.
places
:
for
place
in
self
.
places
:
...
...
python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py
浏览文件 @
73be70a3
...
@@ -178,8 +178,8 @@ class TestElementwiseDivNet(unittest.TestCase):
...
@@ -178,8 +178,8 @@ class TestElementwiseDivNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
class
TestFloatStatus
(
unittest
.
TestCase
):
class
TestFloatStatus
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py
浏览文件 @
73be70a3
...
@@ -326,8 +326,8 @@ class TestElementwiseMaxNet(unittest.TestCase):
...
@@ -326,8 +326,8 @@ class TestElementwiseMaxNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py
浏览文件 @
73be70a3
...
@@ -222,8 +222,8 @@ class TestElementwiseMinOpNet(unittest.TestCase):
...
@@ -222,8 +222,8 @@ class TestElementwiseMinOpNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py
浏览文件 @
73be70a3
...
@@ -328,8 +328,8 @@ class TestElementwisePowNet(unittest.TestCase):
...
@@ -328,8 +328,8 @@ class TestElementwisePowNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py
浏览文件 @
73be70a3
...
@@ -231,8 +231,8 @@ class TestSubtractNet(unittest.TestCase):
...
@@ -231,8 +231,8 @@ class TestSubtractNet(unittest.TestCase):
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
cpu_pred
,
cpu_loos
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loos
=
self
.
_test
(
False
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loos
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loos
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py
浏览文件 @
73be70a3
...
@@ -132,7 +132,7 @@ class TestExpandNet(unittest.TestCase):
...
@@ -132,7 +132,7 @@ class TestExpandNet(unittest.TestCase):
cpu_loss
=
self
.
_test
(
False
)
cpu_loss
=
self
.
_test
(
False
)
npu_loss
=
self
.
_test
(
True
)
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
# ------------------------------------------------
# ------------------------------------------------
...
...
python/paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py
浏览文件 @
73be70a3
...
@@ -282,7 +282,7 @@ class TestGatherNdAPI(unittest.TestCase):
...
@@ -282,7 +282,7 @@ class TestGatherNdAPI(unittest.TestCase):
output
=
paddle
.
fluid
.
layers
.
gather
(
input
,
index
)
output
=
paddle
.
fluid
.
layers
.
gather
(
input
,
index
)
output_np
=
output
.
numpy
()
output_np
=
output
.
numpy
()
expected_output
=
np
.
array
([
3
,
4
])
expected_output
=
np
.
array
([
3
,
4
])
self
.
assertTrue
(
np
.
allclose
(
output_np
,
expected_output
)
)
np
.
testing
.
assert_allclose
(
output_np
[
0
],
expected_output
)
paddle
.
enable_static
()
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py
浏览文件 @
73be70a3
...
@@ -102,7 +102,7 @@ class API_TestGather(unittest.TestCase):
...
@@ -102,7 +102,7 @@ class API_TestGather(unittest.TestCase):
},
},
fetch_list
=
[
out
])
fetch_list
=
[
out
])
expected_output
=
np
.
array
([[
3
,
4
],
[
5
,
6
]])
expected_output
=
np
.
array
([[
3
,
4
],
[
5
,
6
]])
self
.
assertTrue
(
np
.
allclose
(
result
,
expected_output
)
)
np
.
testing
.
assert_allclose
(
result
,
expected_output
,
rtol
=
1e-5
)
def
test_out2
(
self
):
def
test_out2
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
...
@@ -120,7 +120,7 @@ class API_TestGather(unittest.TestCase):
...
@@ -120,7 +120,7 @@ class API_TestGather(unittest.TestCase):
},
},
fetch_list
=
[
out
])
fetch_list
=
[
out
])
expected_output
=
gather_numpy
(
x_np
,
index_np
,
axis
=
0
)
expected_output
=
gather_numpy
(
x_np
,
index_np
,
axis
=
0
)
self
.
assertTrue
(
np
.
allclose
(
result
,
expected_output
)
)
np
.
testing
.
assert_allclose
(
result
,
expected_output
,
rtol
=
1e-5
)
class
TestGatherGrad
(
unittest
.
TestCase
):
class
TestGatherGrad
(
unittest
.
TestCase
):
...
@@ -174,8 +174,8 @@ class TestGatherGrad(unittest.TestCase):
...
@@ -174,8 +174,8 @@ class TestGatherGrad(unittest.TestCase):
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-5
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-5
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py
浏览文件 @
73be70a3
...
@@ -70,8 +70,12 @@ class TestNPUGaussianRandomOp(OpTest):
...
@@ -70,8 +70,12 @@ class TestNPUGaussianRandomOp(OpTest):
hist2
,
_
=
np
.
histogram
(
data
,
range
=
(
-
3
,
5
))
hist2
,
_
=
np
.
histogram
(
data
,
range
=
(
-
3
,
5
))
hist2
=
hist2
.
astype
(
"float32"
)
hist2
=
hist2
.
astype
(
"float32"
)
hist2
/=
float
(
outs
[
0
].
size
)
hist2
/=
float
(
outs
[
0
].
size
)
self
.
assertTrue
(
np
.
allclose
(
hist
,
hist2
,
rtol
=
0
,
atol
=
0.01
),
np
.
testing
.
assert_allclose
(
hist
,
"hist: "
+
str
(
hist
)
+
" hist2: "
+
str
(
hist2
))
hist2
,
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"hist: "
+
str
(
hist
)
+
" hist2: "
+
str
(
hist2
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py
浏览文件 @
73be70a3
...
@@ -150,8 +150,8 @@ class TestGeluNet(unittest.TestCase):
...
@@ -150,8 +150,8 @@ class TestGeluNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
atol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
atol
=
1e-3
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-3
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py
浏览文件 @
73be70a3
...
@@ -113,7 +113,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
...
@@ -113,7 +113,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
for
r
in
res
:
for
r
in
res
:
self
.
assertTrue
(
np
.
allclose
(
out_ref
,
r
)
)
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-6
)
def
test_dygraph_api
(
self
):
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
paddle
.
disable_static
(
self
.
place
)
...
@@ -123,7 +123,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
...
@@ -123,7 +123,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
out2
=
m
(
x
)
out2
=
m
(
x
)
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
for
r
in
[
out1
,
out2
]:
self
.
assertTrue
(
np
.
allclose
(
out_ref
,
r
.
numpy
())
)
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-6
)
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
def
test_fluid_api
(
self
):
...
@@ -133,12 +133,12 @@ class TestHardsigmoidAPI(unittest.TestCase):
...
@@ -133,12 +133,12 @@ class TestHardsigmoidAPI(unittest.TestCase):
exe
=
fluid
.
Executor
(
self
.
place
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_hardsigmoid
(
self
.
x_np
,
0.2
,
0.5
)
out_ref
=
ref_hardsigmoid
(
self
.
x_np
,
0.2
,
0.5
)
self
.
assertTrue
(
np
.
allclose
(
out_ref
,
res
[
0
])
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
]
)
paddle
.
disable_static
(
self
.
place
)
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out
=
paddle
.
fluid
.
layers
.
hard_sigmoid
(
x
)
out
=
paddle
.
fluid
.
layers
.
hard_sigmoid
(
x
)
self
.
assertTrue
(
np
.
allclose
(
out_ref
,
out
.
numpy
()
))
np
.
testing
.
assert_allclose
(
out_ref
,
out
.
numpy
(
))
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_errors
(
self
):
def
test_errors
(
self
):
...
...
python/paddle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py
浏览文件 @
73be70a3
...
@@ -115,16 +115,20 @@ class TestHardSwishNPUWithCPU(unittest.TestCase):
...
@@ -115,16 +115,20 @@ class TestHardSwishNPUWithCPU(unittest.TestCase):
y
=
F
.
hardswish
(
data
)
y
=
F
.
hardswish
(
data
)
y
.
sum
().
backward
()
y
.
sum
().
backward
()
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
np
.
allclose
(
self
.
out_y
.
numpy
(),
y
.
numpy
()),
self
.
out_y
.
numpy
(),
"Output of NPU HardSwish forward has diff at "
+
str
(
self
.
place
)
+
y
.
numpy
(),
"
\n
Expect "
+
str
(
self
.
out_y
)
+
"
\n
"
+
"But Got"
+
str
(
y
)
+
err_msg
=
"Output of NPU HardSwish forward has diff at "
+
" in class "
+
self
.
__class__
.
__name__
+
"."
)
str
(
self
.
place
)
+
"
\n
Expect "
+
str
(
self
.
out_y
)
+
"
\n
"
+
"But Got"
+
self
.
assertTrue
(
str
(
y
)
+
" in class "
+
self
.
__class__
.
__name__
+
"."
,
np
.
allclose
(
self
.
out_g
.
numpy
(),
data
.
grad
.
numpy
()),
rtol
=
1e-5
)
"Output of NPU HardSwish backward has diff at "
+
str
(
self
.
place
)
+
np
.
testing
.
assert_allclose
(
"
\n
Expect "
+
str
(
self
.
out_g
)
+
"
\n
"
+
"But Got"
+
str
(
data
.
grad
)
+
self
.
out_g
.
numpy
(),
" in class "
+
self
.
__class__
.
__name__
+
"."
)
data
.
grad
.
numpy
(),
err_msg
=
"Output of NPU HardSwish backward has diff at "
+
str
(
self
.
place
)
+
"
\n
Expect "
+
str
(
self
.
out_g
)
+
"
\n
"
+
"But Got"
+
str
(
data
.
grad
)
+
" in class "
+
self
.
__class__
.
__name__
+
"."
,
rtol
=
1e-5
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_index_select_op_npu.py
浏览文件 @
73be70a3
...
@@ -133,7 +133,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
...
@@ -133,7 +133,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
return_numpy
=
False
)
return_numpy
=
False
)
expect_out
=
np
.
array
([[
1.0
,
2.0
,
2.0
],
[
5.0
,
6.0
,
6.0
],
expect_out
=
np
.
array
([[
1.0
,
2.0
,
2.0
],
[
5.0
,
6.0
,
6.0
],
[
9.0
,
10.0
,
10.0
]]).
astype
(
'float32'
)
[
9.0
,
10.0
,
10.0
]]).
astype
(
'float32'
)
self
.
assertTrue
(
np
.
allclose
(
expect_out
,
np
.
array
(
res
)
))
np
.
testing
.
assert_allclose
(
expect_out
,
np
.
array
(
res
))
# case 2:
# case 2:
with
program_guard
(
Program
(),
Program
()):
with
program_guard
(
Program
(),
Program
()):
...
@@ -149,7 +149,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
...
@@ -149,7 +149,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
return_numpy
=
False
)
return_numpy
=
False
)
expect_out
=
np
.
array
([[
1.0
,
2.0
,
3.0
,
4.0
],
[
5.0
,
6.0
,
7.0
,
8.0
],
expect_out
=
np
.
array
([[
1.0
,
2.0
,
3.0
,
4.0
],
[
5.0
,
6.0
,
7.0
,
8.0
],
[
5.0
,
6.0
,
7.0
,
8.0
]]).
astype
(
'float32'
)
[
5.0
,
6.0
,
7.0
,
8.0
]]).
astype
(
'float32'
)
self
.
assertTrue
(
np
.
allclose
(
expect_out
,
np
.
array
(
res
)
))
np
.
testing
.
assert_allclose
(
expect_out
,
np
.
array
(
res
))
def
test_dygraph_index_select_api
(
self
):
def
test_dygraph_index_select_api
(
self
):
paddle
.
set_device
(
"npu:0"
)
paddle
.
set_device
(
"npu:0"
)
...
@@ -163,7 +163,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
...
@@ -163,7 +163,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
np_z
=
z
.
numpy
()
np_z
=
z
.
numpy
()
expect_out
=
np
.
array
([[
1.0
,
2.0
,
3.0
,
4.0
],
[
5.0
,
6.0
,
7.0
,
8.0
],
expect_out
=
np
.
array
([[
1.0
,
2.0
,
3.0
,
4.0
],
[
5.0
,
6.0
,
7.0
,
8.0
],
[
5.0
,
6.0
,
7.0
,
8.0
]]).
astype
(
'float32'
)
[
5.0
,
6.0
,
7.0
,
8.0
]]).
astype
(
'float32'
)
self
.
assertTrue
(
np
.
allclose
(
expect_out
,
np_z
)
)
np
.
testing
.
assert_allclose
(
expect_out
,
np_z
)
# case 2:
# case 2:
x
=
paddle
.
to_tensor
(
self
.
data_x
)
x
=
paddle
.
to_tensor
(
self
.
data_x
)
...
@@ -172,7 +172,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
...
@@ -172,7 +172,7 @@ class TestNPUIndexSelectAPI(unittest.TestCase):
np_z
=
z
.
numpy
()
np_z
=
z
.
numpy
()
expect_out
=
np
.
array
([[
1.0
,
2.0
,
2.0
],
[
5.0
,
6.0
,
6.0
],
expect_out
=
np
.
array
([[
1.0
,
2.0
,
2.0
],
[
5.0
,
6.0
,
6.0
],
[
9.0
,
10.0
,
10.0
]]).
astype
(
'float32'
)
[
9.0
,
10.0
,
10.0
]]).
astype
(
'float32'
)
self
.
assertTrue
(
np
.
allclose
(
expect_out
,
np_z
)
)
np
.
testing
.
assert_allclose
(
expect_out
,
np_z
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py
浏览文件 @
73be70a3
...
@@ -117,7 +117,7 @@ class TestKLDivLossDygraph(unittest.TestCase):
...
@@ -117,7 +117,7 @@ class TestKLDivLossDygraph(unittest.TestCase):
kldiv_criterion
=
paddle
.
nn
.
KLDivLoss
(
reduction
)
kldiv_criterion
=
paddle
.
nn
.
KLDivLoss
(
reduction
)
pred_loss
=
kldiv_criterion
(
paddle
.
to_tensor
(
x
),
pred_loss
=
kldiv_criterion
(
paddle
.
to_tensor
(
x
),
paddle
.
to_tensor
(
target
))
paddle
.
to_tensor
(
target
))
self
.
assertTrue
(
np
.
allclose
(
pred_loss
.
numpy
(),
gt_loss
)
)
np
.
testing
.
assert_allclose
(
pred_loss
.
numpy
(),
gt_loss
,
rtol
=
1e-6
)
def
test_kl_loss_batchmean
(
self
):
def
test_kl_loss_batchmean
(
self
):
self
.
run_kl_loss
(
'batchmean'
)
self
.
run_kl_loss
(
'batchmean'
)
...
...
python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py
浏览文件 @
73be70a3
...
@@ -53,10 +53,11 @@ class TestLayerNormOp(unittest.TestCase):
...
@@ -53,10 +53,11 @@ class TestLayerNormOp(unittest.TestCase):
self
.
atol
=
1e-4
self
.
atol
=
1e-4
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
np
.
array
(
tensor
).
astype
(
np
.
allclose
(
np
.
array
(
tensor
).
astype
(
np_array
.
dtype
),
np_array
.
dtype
).
reshape
(
np_array
.
shape
),
np_array
,
np_array
,
atol
=
atol
),
msg
)
atol
=
atol
,
err_msg
=
msg
)
def
check_forward_backward
(
self
,
def
check_forward_backward
(
self
,
shape
,
shape
,
...
...
python/paddle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py
浏览文件 @
73be70a3
...
@@ -145,8 +145,8 @@ class TestLeakyReluNet(unittest.TestCase):
...
@@ -145,8 +145,8 @@ class TestLeakyReluNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py
浏览文件 @
73be70a3
...
@@ -142,8 +142,8 @@ class TestLogNet(unittest.TestCase):
...
@@ -142,8 +142,8 @@ class TestLogNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
atol
=
1e-4
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
atol
=
1e-4
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-4
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-4
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py
浏览文件 @
73be70a3
...
@@ -128,13 +128,13 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
...
@@ -128,13 +128,13 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
y
=
logsoftmax
(
x
)
y
=
logsoftmax
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
out
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
out
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
self
.
assertTrue
(
np
.
allclose
(
out
[
0
],
ref_out
)
)
np
.
testing
.
assert_allclose
(
out
[
0
],
ref_out
,
rtol
=
1e-6
)
# test dygrapg api
# test dygrapg api
paddle
.
disable_static
(
self
.
place
)
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x
)
x
=
paddle
.
to_tensor
(
self
.
x
)
y
=
logsoftmax
(
x
)
y
=
logsoftmax
(
x
)
self
.
assertTrue
(
np
.
allclose
(
y
.
numpy
(),
ref_out
)
)
np
.
testing
.
assert_allclose
(
y
.
numpy
(),
ref_out
,
rtol
=
1e-6
)
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_check_api
(
self
):
def
test_check_api
(
self
):
...
@@ -161,12 +161,12 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
...
@@ -161,12 +161,12 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
y
=
F
.
log_softmax
(
x
,
axis
,
dtype
)
y
=
F
.
log_softmax
(
x
,
axis
,
dtype
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
out
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
out
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
self
.
assertTrue
(
np
.
allclose
(
out
[
0
],
ref_out
)
)
np
.
testing
.
assert_allclose
(
out
[
0
],
ref_out
,
rtol
=
1e-6
)
paddle
.
disable_static
(
self
.
place
)
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x
)
x
=
paddle
.
to_tensor
(
self
.
x
)
y
=
F
.
log_softmax
(
x
,
axis
,
dtype
)
y
=
F
.
log_softmax
(
x
,
axis
,
dtype
)
self
.
assertTrue
(
np
.
allclose
(
y
.
numpy
(),
ref_out
),
True
)
np
.
testing
.
assert_allclose
(
y
.
numpy
(),
ref_out
,
rtol
=
1e-6
)
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_check_api
(
self
):
def
test_check_api
(
self
):
...
...
python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py
浏览文件 @
73be70a3
...
@@ -76,8 +76,8 @@ class TestMemcpy_FillConstant(unittest.TestCase):
...
@@ -76,8 +76,8 @@ class TestMemcpy_FillConstant(unittest.TestCase):
npu_
,
cpu_
=
exe
.
run
(
main_program
,
npu_
,
cpu_
=
exe
.
run
(
main_program
,
feed
=
{},
feed
=
{},
fetch_list
=
[
npu_var
.
name
,
cpu_var
.
name
])
fetch_list
=
[
npu_var
.
name
,
cpu_var
.
name
])
self
.
assertTrue
(
np
.
allclose
(
npu_
,
cpu_
)
)
np
.
testing
.
assert_allclose
(
npu_
,
cpu_
)
self
.
assertTrue
(
np
.
allclose
(
cpu_
,
np
.
ones
((
10
,
10
)
)))
np
.
testing
.
assert_allclose
(
cpu_
,
np
.
ones
((
10
,
10
)))
def
test_cpu_cpoy_npu
(
self
):
def
test_cpu_cpoy_npu
(
self
):
main_program
,
npu_var
,
cpu_var
=
self
.
get_prog
()
main_program
,
npu_var
,
cpu_var
=
self
.
get_prog
()
...
@@ -90,8 +90,8 @@ class TestMemcpy_FillConstant(unittest.TestCase):
...
@@ -90,8 +90,8 @@ class TestMemcpy_FillConstant(unittest.TestCase):
npu_
,
cpu_
=
exe
.
run
(
main_program
,
npu_
,
cpu_
=
exe
.
run
(
main_program
,
feed
=
{},
feed
=
{},
fetch_list
=
[
npu_var
.
name
,
cpu_var
.
name
])
fetch_list
=
[
npu_var
.
name
,
cpu_var
.
name
])
self
.
assertTrue
(
np
.
allclose
(
npu_
,
cpu_
)
)
np
.
testing
.
assert_allclose
(
npu_
,
cpu_
)
self
.
assertTrue
(
np
.
allclose
(
npu_
,
np
.
zeros
((
10
,
10
)
)))
np
.
testing
.
assert_allclose
(
npu_
,
np
.
zeros
((
10
,
10
)))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_merged_momentum_op_npu.py
浏览文件 @
73be70a3
...
@@ -316,7 +316,7 @@ class TestMergedMomentum(unittest.TestCase):
...
@@ -316,7 +316,7 @@ class TestMergedMomentum(unittest.TestCase):
outs2
=
run_op
(
False
)
outs2
=
run_op
(
False
)
self
.
assertEqual
(
len
(
outs1
),
len
(
outs2
))
self
.
assertEqual
(
len
(
outs1
),
len
(
outs2
))
for
i
,
(
out1
,
out2
)
in
enumerate
(
zip
(
outs1
,
outs2
)):
for
i
,
(
out1
,
out2
)
in
enumerate
(
zip
(
outs1
,
outs2
)):
self
.
assertTrue
(
np
.
allclose
(
out1
,
out2
,
atol
=
1e-7
)
)
np
.
testing
.
assert_allclose
(
out1
,
out2
,
atol
=
1e-7
)
def
test_main
(
self
):
def
test_main
(
self
):
self
.
check_with_place
(
self
.
place
,
multi_precision
=
False
)
self
.
check_with_place
(
self
.
place
,
multi_precision
=
False
)
...
@@ -370,13 +370,13 @@ class TestMergedMomentum2(unittest.TestCase):
...
@@ -370,13 +370,13 @@ class TestMergedMomentum2(unittest.TestCase):
outs2
=
run_op
(
use_nesterov
=
True
,
use_merged
=
False
)
outs2
=
run_op
(
use_nesterov
=
True
,
use_merged
=
False
)
self
.
assertEqual
(
len
(
outs1
),
len
(
outs2
))
self
.
assertEqual
(
len
(
outs1
),
len
(
outs2
))
for
i
,
(
out1
,
out2
)
in
enumerate
(
zip
(
outs1
,
outs2
)):
for
i
,
(
out1
,
out2
)
in
enumerate
(
zip
(
outs1
,
outs2
)):
self
.
assertTrue
(
np
.
allclose
(
out1
,
out2
,
atol
=
1e-7
)
)
np
.
testing
.
assert_allclose
(
out1
,
out2
,
atol
=
1e-7
)
outs3
=
run_op
(
use_nesterov
=
False
,
use_merged
=
True
)
outs3
=
run_op
(
use_nesterov
=
False
,
use_merged
=
True
)
outs4
=
run_op
(
use_nesterov
=
False
,
use_merged
=
False
)
outs4
=
run_op
(
use_nesterov
=
False
,
use_merged
=
False
)
self
.
assertEqual
(
len
(
outs3
),
len
(
outs4
))
self
.
assertEqual
(
len
(
outs3
),
len
(
outs4
))
for
j
,
(
out3
,
out4
)
in
enumerate
(
zip
(
outs3
,
outs4
)):
for
j
,
(
out3
,
out4
)
in
enumerate
(
zip
(
outs3
,
outs4
)):
self
.
assertTrue
(
np
.
allclose
(
out3
,
out4
,
atol
=
1e-7
)
)
np
.
testing
.
assert_allclose
(
out3
,
out4
,
atol
=
1e-7
)
def
test_main
(
self
):
def
test_main
(
self
):
self
.
check_with_place
(
self
.
place
,
multi_precision
=
False
)
self
.
check_with_place
(
self
.
place
,
multi_precision
=
False
)
...
...
python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py
浏览文件 @
73be70a3
...
@@ -123,8 +123,8 @@ class TestMeshgridOp3(unittest.TestCase):
...
@@ -123,8 +123,8 @@ class TestMeshgridOp3(unittest.TestCase):
},
},
fetch_list
=
[
grid_x
,
grid_y
])
fetch_list
=
[
grid_x
,
grid_y
])
self
.
assertTrue
(
np
.
allclose
(
res_1
,
out_1
)
)
np
.
testing
.
assert_allclose
(
res_1
,
out_1
)
self
.
assertTrue
(
np
.
allclose
(
res_2
,
out_2
)
)
np
.
testing
.
assert_allclose
(
res_2
,
out_2
)
class
TestMeshgridOp4
(
unittest
.
TestCase
):
class
TestMeshgridOp4
(
unittest
.
TestCase
):
...
@@ -154,8 +154,8 @@ class TestMeshgridOp4(unittest.TestCase):
...
@@ -154,8 +154,8 @@ class TestMeshgridOp4(unittest.TestCase):
},
},
fetch_list
=
[
grid_x
,
grid_y
])
fetch_list
=
[
grid_x
,
grid_y
])
self
.
assertTrue
(
np
.
allclose
(
res_1
,
out_1
)
)
np
.
testing
.
assert_allclose
(
res_1
,
out_1
)
self
.
assertTrue
(
np
.
allclose
(
res_2
,
out_2
)
)
np
.
testing
.
assert_allclose
(
res_2
,
out_2
)
class
TestMeshgridOp5
(
unittest
.
TestCase
):
class
TestMeshgridOp5
(
unittest
.
TestCase
):
...
@@ -185,8 +185,8 @@ class TestMeshgridOp5(unittest.TestCase):
...
@@ -185,8 +185,8 @@ class TestMeshgridOp5(unittest.TestCase):
},
},
fetch_list
=
[
grid_x
,
grid_y
])
fetch_list
=
[
grid_x
,
grid_y
])
self
.
assertTrue
(
np
.
allclose
(
res_1
,
out_1
)
)
np
.
testing
.
assert_allclose
(
res_1
,
out_1
)
self
.
assertTrue
(
np
.
allclose
(
res_2
,
out_2
)
)
np
.
testing
.
assert_allclose
(
res_2
,
out_2
)
class
TestMeshgridOp6
(
unittest
.
TestCase
):
class
TestMeshgridOp6
(
unittest
.
TestCase
):
...
@@ -209,8 +209,8 @@ class TestMeshgridOp6(unittest.TestCase):
...
@@ -209,8 +209,8 @@ class TestMeshgridOp6(unittest.TestCase):
tensor_4
=
paddle
.
to_tensor
(
input_4
)
tensor_4
=
paddle
.
to_tensor
(
input_4
)
res_3
,
res_4
=
paddle
.
tensor
.
meshgrid
(
tensor_3
,
tensor_4
)
res_3
,
res_4
=
paddle
.
tensor
.
meshgrid
(
tensor_3
,
tensor_4
)
self
.
assertTrue
(
np
.
allclose
(
res_3
.
numpy
(),
out_3
)
)
np
.
testing
.
assert_allclose
(
res_3
.
numpy
(),
out_3
)
self
.
assertTrue
(
np
.
allclose
(
res_4
.
numpy
(),
out_4
)
)
np
.
testing
.
assert_allclose
(
res_4
.
numpy
(),
out_4
)
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -234,8 +234,8 @@ class TestMeshgridOp7(unittest.TestCase):
...
@@ -234,8 +234,8 @@ class TestMeshgridOp7(unittest.TestCase):
tensor_4
=
paddle
.
to_tensor
(
input_4
)
tensor_4
=
paddle
.
to_tensor
(
input_4
)
res_3
,
res_4
=
paddle
.
meshgrid
([
tensor_3
,
tensor_4
])
res_3
,
res_4
=
paddle
.
meshgrid
([
tensor_3
,
tensor_4
])
self
.
assertTrue
(
np
.
allclose
(
res_3
.
numpy
(),
out_3
)
)
np
.
testing
.
assert_allclose
(
res_3
.
numpy
(),
out_3
)
self
.
assertTrue
(
np
.
allclose
(
res_4
.
numpy
(),
out_4
)
)
np
.
testing
.
assert_allclose
(
res_4
.
numpy
(),
out_4
)
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -259,8 +259,8 @@ class TestMeshgridOp8(unittest.TestCase):
...
@@ -259,8 +259,8 @@ class TestMeshgridOp8(unittest.TestCase):
tensor_4
=
paddle
.
to_tensor
(
input_4
)
tensor_4
=
paddle
.
to_tensor
(
input_4
)
res_3
,
res_4
=
paddle
.
tensor
.
meshgrid
((
tensor_3
,
tensor_4
))
res_3
,
res_4
=
paddle
.
tensor
.
meshgrid
((
tensor_3
,
tensor_4
))
self
.
assertTrue
(
np
.
allclose
(
res_3
.
numpy
(),
out_3
)
)
np
.
testing
.
assert_allclose
(
res_3
.
numpy
(),
out_3
)
self
.
assertTrue
(
np
.
allclose
(
res_4
.
numpy
(),
out_4
)
)
np
.
testing
.
assert_allclose
(
res_4
.
numpy
(),
out_4
)
paddle
.
enable_static
()
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py
浏览文件 @
73be70a3
...
@@ -285,8 +285,8 @@ class TestMulNet(unittest.TestCase):
...
@@ -285,8 +285,8 @@ class TestMulNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
class
TestMulNet3_2
(
unittest
.
TestCase
):
class
TestMulNet3_2
(
unittest
.
TestCase
):
...
@@ -358,9 +358,9 @@ class TestMulNet3_2(unittest.TestCase):
...
@@ -358,9 +358,9 @@ class TestMulNet3_2(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
np
.
testing
.
assert_
allclose
(
npu_pred
,
cpu_pred
,
atol
=
1e-5
)
)
# atol needed on cann 20.3
atol
=
1e-5
)
# atol needed on cann 20.3
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-5
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-5
)
class
TestMulNet3_2_xc2
(
unittest
.
TestCase
):
class
TestMulNet3_2_xc2
(
unittest
.
TestCase
):
...
@@ -433,8 +433,8 @@ class TestMulNet3_2_xc2(unittest.TestCase):
...
@@ -433,8 +433,8 @@ class TestMulNet3_2_xc2(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
class
TestMulNet4_2
(
unittest
.
TestCase
):
class
TestMulNet4_2
(
unittest
.
TestCase
):
...
@@ -509,9 +509,9 @@ class TestMulNet4_2(unittest.TestCase):
...
@@ -509,9 +509,9 @@ class TestMulNet4_2(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
np
.
testing
.
assert_
allclose
(
npu_pred
,
cpu_pred
,
atol
=
1e-5
)
)
# atol needed on cann 20.3
atol
=
1e-5
)
# atol needed on cann 20.3
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-5
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
atol
=
1e-5
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py
浏览文件 @
73be70a3
...
@@ -77,9 +77,12 @@ class TestMultinomialOp(OpTest):
...
@@ -77,9 +77,12 @@ class TestMultinomialOp(OpTest):
# normalize the input to get the probability
# normalize the input to get the probability
prob
=
self
.
input_np
/
self
.
input_np
.
sum
(
axis
=-
1
,
keepdims
=
True
)
prob
=
self
.
input_np
/
self
.
input_np
.
sum
(
axis
=-
1
,
keepdims
=
True
)
sample_prob
=
self
.
sample_output
(
np
.
array
(
outs
[
0
]))
sample_prob
=
self
.
sample_output
(
np
.
array
(
outs
[
0
]))
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
sample_prob
,
np
.
allclose
(
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
prob
,
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
class
TestMultinomialOp2
(
TestMultinomialOp
):
class
TestMultinomialOp2
(
TestMultinomialOp
):
...
@@ -122,9 +125,12 @@ class TestMultinomialApi(unittest.TestCase):
...
@@ -122,9 +125,12 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob
=
sample_output_one_dimension
(
out
.
numpy
(),
4
)
sample_prob
=
sample_output_one_dimension
(
out
.
numpy
(),
4
)
prob
=
x_numpy
/
x_numpy
.
sum
(
axis
=-
1
,
keepdims
=
True
)
prob
=
x_numpy
/
x_numpy
.
sum
(
axis
=-
1
,
keepdims
=
True
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
sample_prob
,
np
.
allclose
(
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
prob
,
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_dygraph2
(
self
):
def
test_dygraph2
(
self
):
...
@@ -137,9 +143,12 @@ class TestMultinomialApi(unittest.TestCase):
...
@@ -137,9 +143,12 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob
=
sample_output_two_dimension
(
out
.
numpy
(),
[
3
,
4
])
sample_prob
=
sample_output_two_dimension
(
out
.
numpy
(),
[
3
,
4
])
prob
=
x_numpy
/
x_numpy
.
sum
(
axis
=-
1
,
keepdims
=
True
)
prob
=
x_numpy
/
x_numpy
.
sum
(
axis
=-
1
,
keepdims
=
True
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
sample_prob
,
np
.
allclose
(
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
prob
,
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_dygraph3
(
self
):
def
test_dygraph3
(
self
):
...
@@ -182,9 +191,12 @@ class TestMultinomialApi(unittest.TestCase):
...
@@ -182,9 +191,12 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob
=
sample_output_one_dimension
(
out
,
4
)
sample_prob
=
sample_output_one_dimension
(
out
,
4
)
prob
=
x_np
/
x_np
.
sum
(
axis
=-
1
,
keepdims
=
True
)
prob
=
x_np
/
x_np
.
sum
(
axis
=-
1
,
keepdims
=
True
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
sample_prob
,
np
.
allclose
(
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
prob
,
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
class
TestMultinomialAlias
(
unittest
.
TestCase
):
class
TestMultinomialAlias
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py
浏览文件 @
73be70a3
...
@@ -466,10 +466,10 @@ class TestNearestAPI(unittest.TestCase):
...
@@ -466,10 +466,10 @@ class TestNearestAPI(unittest.TestCase):
out_h
=
12
,
out_h
=
12
,
out_w
=
12
,
out_w
=
12
,
align_corners
=
False
)
align_corners
=
False
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
results
[
0
],
np
.
allclose
(
results
[
0
],
np
.
transpose
(
expect_res
,
(
0
,
2
,
3
,
1
)
)))
np
.
transpose
(
expect_res
,
(
0
,
2
,
3
,
1
)))
for
i
in
range
(
len
(
results
)
-
1
):
for
i
in
range
(
len
(
results
)
-
1
):
self
.
assertTrue
(
np
.
allclose
(
results
[
i
+
1
],
expect_res
)
)
np
.
testing
.
assert_allclose
(
results
[
i
+
1
],
expect_res
)
class
TestNearestInterpException
(
unittest
.
TestCase
):
class
TestNearestInterpException
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py
浏览文件 @
73be70a3
...
@@ -398,7 +398,7 @@ class TestNearestInterpOpAPI_dy(unittest.TestCase):
...
@@ -398,7 +398,7 @@ class TestNearestInterpOpAPI_dy(unittest.TestCase):
scale_factor
=
scale
,
scale_factor
=
scale
,
mode
=
"nearest"
,
mode
=
"nearest"
,
align_corners
=
False
)
align_corners
=
False
)
self
.
assertTrue
(
np
.
allclose
(
out
.
numpy
(),
expect_res
)
)
np
.
testing
.
assert_allclose
(
out
.
numpy
(),
expect_res
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py
浏览文件 @
73be70a3
...
@@ -202,8 +202,8 @@ class TestPadAPI(unittest.TestCase):
...
@@ -202,8 +202,8 @@ class TestPadAPI(unittest.TestCase):
mode
,
mode
,
value
,
value
,
data_format
=
"NDHWC"
)
data_format
=
"NDHWC"
)
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
np_out1
)
)
np
.
testing
.
assert_allclose
(
fetches
[
0
],
np_out1
)
self
.
assertTrue
(
np
.
allclose
(
fetches
[
1
],
np_out2
)
)
np
.
testing
.
assert_allclose
(
fetches
[
1
],
np_out2
)
def
test_dygraph_1
(
self
):
def
test_dygraph_1
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -238,8 +238,8 @@ class TestPadAPI(unittest.TestCase):
...
@@ -238,8 +238,8 @@ class TestPadAPI(unittest.TestCase):
value
=
value
,
value
=
value
,
data_format
=
"NDHWC"
)
data_format
=
"NDHWC"
)
self
.
assertTrue
(
np
.
allclose
(
y1
.
numpy
(),
np_out1
)
)
np
.
testing
.
assert_allclose
(
y1
.
numpy
(),
np_out1
)
self
.
assertTrue
(
np
.
allclose
(
y2
.
numpy
(),
np_out2
)
)
np
.
testing
.
assert_allclose
(
y2
.
numpy
(),
np_out2
)
def
test_dygraph_2
(
self
):
def
test_dygraph_2
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -274,8 +274,8 @@ class TestPadAPI(unittest.TestCase):
...
@@ -274,8 +274,8 @@ class TestPadAPI(unittest.TestCase):
value
=
value
,
value
=
value
,
data_format
=
"NHWC"
)
data_format
=
"NHWC"
)
self
.
assertTrue
(
np
.
allclose
(
y1
.
numpy
(),
np_out1
)
)
np
.
testing
.
assert_allclose
(
y1
.
numpy
(),
np_out1
)
self
.
assertTrue
(
np
.
allclose
(
y2
.
numpy
(),
np_out2
)
)
np
.
testing
.
assert_allclose
(
y2
.
numpy
(),
np_out2
)
def
test_dygraph_3
(
self
):
def
test_dygraph_3
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -310,8 +310,8 @@ class TestPadAPI(unittest.TestCase):
...
@@ -310,8 +310,8 @@ class TestPadAPI(unittest.TestCase):
value
=
value
,
value
=
value
,
data_format
=
"NLC"
)
data_format
=
"NLC"
)
self
.
assertTrue
(
np
.
allclose
(
y1
.
numpy
(),
np_out1
)
)
np
.
testing
.
assert_allclose
(
y1
.
numpy
(),
np_out1
)
self
.
assertTrue
(
np
.
allclose
(
y2
.
numpy
(),
np_out2
)
)
np
.
testing
.
assert_allclose
(
y2
.
numpy
(),
np_out2
)
class
TestPad1dAPI
(
unittest
.
TestCase
):
class
TestPad1dAPI
(
unittest
.
TestCase
):
...
@@ -360,14 +360,14 @@ class TestPad1dAPI(unittest.TestCase):
...
@@ -360,14 +360,14 @@ class TestPad1dAPI(unittest.TestCase):
"constant"
,
"constant"
,
value
=
value
,
value
=
value
,
data_format
=
"NCL"
)
data_format
=
"NCL"
)
self
.
assertTrue
(
np
.
allclose
(
output
.
numpy
(),
np_out
)
)
np
.
testing
.
assert_allclose
(
output
.
numpy
(),
np_out
)
output
=
pad_constant_int
(
data
)
output
=
pad_constant_int
(
data
)
np_out
=
self
.
_get_numpy_out
(
input_data
,
[
pad_int
]
*
2
,
np_out
=
self
.
_get_numpy_out
(
input_data
,
[
pad_int
]
*
2
,
"constant"
,
"constant"
,
value
=
value
,
value
=
value
,
data_format
=
"NCL"
)
data_format
=
"NCL"
)
self
.
assertTrue
(
np
.
allclose
(
output
.
numpy
(),
np_out
)
)
np
.
testing
.
assert_allclose
(
output
.
numpy
(),
np_out
)
class
TestPad2dAPI
(
unittest
.
TestCase
):
class
TestPad2dAPI
(
unittest
.
TestCase
):
...
@@ -418,14 +418,14 @@ class TestPad2dAPI(unittest.TestCase):
...
@@ -418,14 +418,14 @@ class TestPad2dAPI(unittest.TestCase):
"constant"
,
"constant"
,
value
=
value
,
value
=
value
,
data_format
=
"NCHW"
)
data_format
=
"NCHW"
)
self
.
assertTrue
(
np
.
allclose
(
output
.
numpy
(),
np_out
)
)
np
.
testing
.
assert_allclose
(
output
.
numpy
(),
np_out
)
output
=
pad_constant_int
(
data
)
output
=
pad_constant_int
(
data
)
np_out
=
self
.
_get_numpy_out
(
input_data
,
[
pad_int
]
*
4
,
np_out
=
self
.
_get_numpy_out
(
input_data
,
[
pad_int
]
*
4
,
"constant"
,
"constant"
,
value
=
value
,
value
=
value
,
data_format
=
"NCHW"
)
data_format
=
"NCHW"
)
self
.
assertTrue
(
np
.
allclose
(
output
.
numpy
(),
np_out
)
)
np
.
testing
.
assert_allclose
(
output
.
numpy
(),
np_out
)
class
TestPad3dAPI
(
unittest
.
TestCase
):
class
TestPad3dAPI
(
unittest
.
TestCase
):
...
@@ -478,14 +478,14 @@ class TestPad3dAPI(unittest.TestCase):
...
@@ -478,14 +478,14 @@ class TestPad3dAPI(unittest.TestCase):
"constant"
,
"constant"
,
value
=
value
,
value
=
value
,
data_format
=
"NCDHW"
)
data_format
=
"NCDHW"
)
self
.
assertTrue
(
np
.
allclose
(
output
.
numpy
(),
np_out
)
)
np
.
testing
.
assert_allclose
(
output
.
numpy
(),
np_out
)
output
=
pad_constant_int
(
data
)
output
=
pad_constant_int
(
data
)
np_out
=
self
.
_get_numpy_out
(
input_data
,
[
pad_int
]
*
6
,
np_out
=
self
.
_get_numpy_out
(
input_data
,
[
pad_int
]
*
6
,
"constant"
,
"constant"
,
value
=
value
,
value
=
value
,
data_format
=
"NCDHW"
)
data_format
=
"NCDHW"
)
self
.
assertTrue
(
np
.
allclose
(
output
.
numpy
(),
np_out
)
)
np
.
testing
.
assert_allclose
(
output
.
numpy
(),
np_out
)
class
TestPad3dOpNpuError
(
unittest
.
TestCase
):
class
TestPad3dOpNpuError
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py
浏览文件 @
73be70a3
...
@@ -142,8 +142,8 @@ class TestPowNet(unittest.TestCase):
...
@@ -142,8 +142,8 @@ class TestPowNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py
浏览文件 @
73be70a3
...
@@ -150,8 +150,8 @@ class TestReduceSumNet(unittest.TestCase):
...
@@ -150,8 +150,8 @@ class TestReduceSumNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
)
class
TestReduceSumNet2
(
TestReduceSumNet
):
class
TestReduceSumNet2
(
TestReduceSumNet
):
...
...
python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py
浏览文件 @
73be70a3
...
@@ -163,8 +163,8 @@ class TestRelu6Net(unittest.TestCase):
...
@@ -163,8 +163,8 @@ class TestRelu6Net(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py
浏览文件 @
73be70a3
...
@@ -157,8 +157,8 @@ class TestReluNet(unittest.TestCase):
...
@@ -157,8 +157,8 @@ class TestReluNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py
浏览文件 @
73be70a3
...
@@ -88,8 +88,8 @@ class TestNet(unittest.TestCase):
...
@@ -88,8 +88,8 @@ class TestNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
class
TestCenteredNet
(
unittest
.
TestCase
):
class
TestCenteredNet
(
unittest
.
TestCase
):
...
@@ -151,8 +151,8 @@ class TestCenteredNet(unittest.TestCase):
...
@@ -151,8 +151,8 @@ class TestCenteredNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-3
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-3
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py
浏览文件 @
73be70a3
...
@@ -112,8 +112,8 @@ class TestNet(unittest.TestCase):
...
@@ -112,8 +112,8 @@ class TestNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py
浏览文件 @
73be70a3
...
@@ -316,8 +316,8 @@ class TestSliceNet(unittest.TestCase):
...
@@ -316,8 +316,8 @@ class TestSliceNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
class
TestSliceOpDecsDim
(
OpTest
):
class
TestSliceOpDecsDim
(
OpTest
):
...
...
python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py
浏览文件 @
73be70a3
...
@@ -118,8 +118,8 @@ class TestSoftmaxNet(unittest.TestCase):
...
@@ -118,8 +118,8 @@ class TestSoftmaxNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-2
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-2
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-2
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-2
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py
浏览文件 @
73be70a3
...
@@ -155,8 +155,8 @@ class TestPowNet(unittest.TestCase):
...
@@ -155,8 +155,8 @@ class TestPowNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-5
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-5
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py
浏览文件 @
73be70a3
...
@@ -113,8 +113,8 @@ class API_TestSplit(unittest.TestCase):
...
@@ -113,8 +113,8 @@ class API_TestSplit(unittest.TestCase):
input1
=
np
.
random
.
random
([
1
,
10
]).
astype
(
'float32'
)
input1
=
np
.
random
.
random
([
1
,
10
]).
astype
(
'float32'
)
r0
,
r1
=
exe
.
run
(
feed
=
{
"data"
:
input1
},
fetch_list
=
[
x0
,
x1
])
r0
,
r1
=
exe
.
run
(
feed
=
{
"data"
:
input1
},
fetch_list
=
[
x0
,
x1
])
ex_x0
,
ex_x1
=
np
.
split
(
input1
,
(
3
,
),
axis
=
1
)
ex_x0
,
ex_x1
=
np
.
split
(
input1
,
(
3
,
),
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
ex_x0
,
r0
)
)
np
.
testing
.
assert_allclose
(
ex_x0
,
r0
)
self
.
assertTrue
(
np
.
allclose
(
ex_x1
,
r1
)
)
np
.
testing
.
assert_allclose
(
ex_x1
,
r1
)
class
API_TestSplit2
(
unittest
.
TestCase
):
class
API_TestSplit2
(
unittest
.
TestCase
):
...
@@ -128,8 +128,8 @@ class API_TestSplit2(unittest.TestCase):
...
@@ -128,8 +128,8 @@ class API_TestSplit2(unittest.TestCase):
input1
=
np
.
random
.
random
([
1
,
10
]).
astype
(
'float32'
)
input1
=
np
.
random
.
random
([
1
,
10
]).
astype
(
'float32'
)
r0
,
r1
=
exe
.
run
(
feed
=
{
"data"
:
input1
},
fetch_list
=
[
x0
,
x1
])
r0
,
r1
=
exe
.
run
(
feed
=
{
"data"
:
input1
},
fetch_list
=
[
x0
,
x1
])
ex_x0
,
ex_x1
=
np
.
split
(
input1
,
2
,
axis
=
1
)
ex_x0
,
ex_x1
=
np
.
split
(
input1
,
2
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
ex_x0
,
r0
)
)
np
.
testing
.
assert_allclose
(
ex_x0
,
r0
)
self
.
assertTrue
(
np
.
allclose
(
ex_x1
,
r1
)
)
np
.
testing
.
assert_allclose
(
ex_x1
,
r1
)
class
API_TestDygraphSplit
(
unittest
.
TestCase
):
class
API_TestDygraphSplit
(
unittest
.
TestCase
):
...
@@ -144,9 +144,9 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -144,9 +144,9 @@ class API_TestDygraphSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
ex_x0
,
x0_out
)
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
)
self
.
assertTrue
(
np
.
allclose
(
ex_x1
,
x1_out
)
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
)
self
.
assertTrue
(
np
.
allclose
(
ex_x2
,
x2_out
)
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
)
def
test_out2
(
self
):
def
test_out2
(
self
):
with
fluid
.
dygraph
.
guard
(
paddle
.
NPUPlace
(
0
)):
with
fluid
.
dygraph
.
guard
(
paddle
.
NPUPlace
(
0
)):
...
@@ -158,9 +158,9 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -158,9 +158,9 @@ class API_TestDygraphSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
(
1
,
3
),
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
(
1
,
3
),
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
ex_x0
,
x0_out
)
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
)
self
.
assertTrue
(
np
.
allclose
(
ex_x1
,
x1_out
)
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
)
self
.
assertTrue
(
np
.
allclose
(
ex_x2
,
x2_out
)
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py
浏览文件 @
73be70a3
...
@@ -145,8 +145,8 @@ class TestSqrtNet(unittest.TestCase):
...
@@ -145,8 +145,8 @@ class TestSqrtNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py
浏览文件 @
73be70a3
...
@@ -142,8 +142,8 @@ class TestSquareNet(unittest.TestCase):
...
@@ -142,8 +142,8 @@ class TestSquareNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py
浏览文件 @
73be70a3
...
@@ -143,7 +143,7 @@ class API_TestSqueeze(unittest.TestCase):
...
@@ -143,7 +143,7 @@ class API_TestSqueeze(unittest.TestCase):
result
,
=
exe
.
run
(
feed
=
{
"data1"
:
input1
},
result
,
=
exe
.
run
(
feed
=
{
"data1"
:
input1
},
fetch_list
=
[
result_squeeze
])
fetch_list
=
[
result_squeeze
])
expected_result
=
np
.
squeeze
(
input1
,
axis
=
1
)
expected_result
=
np
.
squeeze
(
input1
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
result
)
)
np
.
testing
.
assert_allclose
(
expected_result
,
result
)
class
API_TestStaticSqueeze_
(
API_TestSqueeze
):
class
API_TestStaticSqueeze_
(
API_TestSqueeze
):
...
@@ -168,7 +168,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
...
@@ -168,7 +168,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
output
=
self
.
squeeze
(
input
,
axis
=
[
1
])
output
=
self
.
squeeze
(
input
,
axis
=
[
1
])
out_np
=
output
.
numpy
()
out_np
=
output
.
numpy
()
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
expected_out
,
out_np
)
)
np
.
testing
.
assert_allclose
(
expected_out
,
out_np
)
def
test_out_int8
(
self
):
def
test_out_int8
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -178,7 +178,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
...
@@ -178,7 +178,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
output
=
self
.
squeeze
(
input
,
axis
=
[
1
])
output
=
self
.
squeeze
(
input
,
axis
=
[
1
])
out_np
=
output
.
numpy
()
out_np
=
output
.
numpy
()
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
expected_out
,
out_np
)
)
np
.
testing
.
assert_allclose
(
expected_out
,
out_np
)
def
test_out_uint8
(
self
):
def
test_out_uint8
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -188,7 +188,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
...
@@ -188,7 +188,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
output
=
self
.
squeeze
(
input
,
axis
=
[
1
])
output
=
self
.
squeeze
(
input
,
axis
=
[
1
])
out_np
=
output
.
numpy
()
out_np
=
output
.
numpy
()
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
expected_out
,
out_np
)
)
np
.
testing
.
assert_allclose
(
expected_out
,
out_np
)
def
test_axis_not_list
(
self
):
def
test_axis_not_list
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -198,7 +198,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
...
@@ -198,7 +198,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
output
=
self
.
squeeze
(
input
,
axis
=
1
)
output
=
self
.
squeeze
(
input
,
axis
=
1
)
out_np
=
output
.
numpy
()
out_np
=
output
.
numpy
()
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
expected_out
,
out_np
)
)
np
.
testing
.
assert_allclose
(
expected_out
,
out_np
)
def
test_dimension_not_1
(
self
):
def
test_dimension_not_1
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
...
@@ -208,7 +208,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
...
@@ -208,7 +208,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
output
=
self
.
squeeze
(
input
,
axis
=
(
1
,
0
))
output
=
self
.
squeeze
(
input
,
axis
=
(
1
,
0
))
out_np
=
output
.
numpy
()
out_np
=
output
.
numpy
()
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
expected_out
=
np
.
squeeze
(
input_1
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
expected_out
,
out_np
)
)
np
.
testing
.
assert_allclose
(
expected_out
,
out_np
)
class
API_TestDygraphSqueezeInplace
(
API_TestDygraphSqueeze
):
class
API_TestDygraphSqueezeInplace
(
API_TestDygraphSqueeze
):
...
...
python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py
浏览文件 @
73be70a3
...
@@ -157,9 +157,8 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase):
...
@@ -157,9 +157,8 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase):
self
.
assertTrue
(
self
.
out_var
.
shape
[
self
.
axis
]
==
-
1
)
self
.
assertTrue
(
self
.
out_var
.
shape
[
self
.
axis
]
==
-
1
)
exe
=
fluid
.
Executor
(
self
.
place
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
self
.
program
,
fetch_list
=
self
.
out_var
)
res
=
exe
.
run
(
self
.
program
,
fetch_list
=
self
.
out_var
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
np
.
array_equal
(
res
[
0
],
res
[
0
],
np
.
stack
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
))
np
.
stack
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
)))
class
TestTensorStackAPIWithLoDTensorArray
(
unittest
.
TestCase
):
class
TestTensorStackAPIWithLoDTensorArray
(
unittest
.
TestCase
):
...
@@ -192,9 +191,8 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
...
@@ -192,9 +191,8 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
self
.
assertTrue
(
self
.
out_var
.
shape
[
self
.
axis
]
==
-
1
)
self
.
assertTrue
(
self
.
out_var
.
shape
[
self
.
axis
]
==
-
1
)
exe
=
fluid
.
Executor
(
self
.
place
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
self
.
program
,
fetch_list
=
self
.
out_var
)
res
=
exe
.
run
(
self
.
program
,
fetch_list
=
self
.
out_var
)
self
.
assertTrue
(
np
.
testing
.
assert_allclose
(
np
.
array_equal
(
res
[
0
],
res
[
0
],
np
.
stack
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
))
np
.
stack
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
)))
class
API_test
(
unittest
.
TestCase
):
class
API_test
(
unittest
.
TestCase
):
...
@@ -217,7 +215,7 @@ class API_test(unittest.TestCase):
...
@@ -217,7 +215,7 @@ class API_test(unittest.TestCase):
},
},
fetch_list
=
[
result_stack
])
fetch_list
=
[
result_stack
])
expected_result
=
np
.
stack
([
input1
,
input2
,
input3
],
axis
=
0
)
expected_result
=
np
.
stack
([
input1
,
input2
,
input3
],
axis
=
0
)
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
result
)
)
np
.
testing
.
assert_allclose
(
expected_result
,
result
)
def
test_single_tensor_error
(
self
):
def
test_single_tensor_error
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
...
@@ -238,14 +236,14 @@ class API_DygraphTest(unittest.TestCase):
...
@@ -238,14 +236,14 @@ class API_DygraphTest(unittest.TestCase):
result
=
paddle
.
stack
([
x1
,
x2
,
x3
])
result
=
paddle
.
stack
([
x1
,
x2
,
x3
])
result_np
=
result
.
numpy
()
result_np
=
result
.
numpy
()
expected_result
=
np
.
stack
([
data1
,
data2
,
data3
])
expected_result
=
np
.
stack
([
data1
,
data2
,
data3
])
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
result_np
)
)
np
.
testing
.
assert_allclose
(
expected_result
,
result_np
)
with
fluid
.
dygraph
.
guard
(
place
=
paddle
.
NPUPlace
(
0
)):
with
fluid
.
dygraph
.
guard
(
place
=
paddle
.
NPUPlace
(
0
)):
y1
=
fluid
.
dygraph
.
to_variable
(
data1
)
y1
=
fluid
.
dygraph
.
to_variable
(
data1
)
result
=
paddle
.
stack
([
y1
],
axis
=
0
)
result
=
paddle
.
stack
([
y1
],
axis
=
0
)
result_np_2
=
result
.
numpy
()
result_np_2
=
result
.
numpy
()
expected_result_2
=
np
.
stack
([
data1
],
axis
=
0
)
expected_result_2
=
np
.
stack
([
data1
],
axis
=
0
)
self
.
assertTrue
(
np
.
allclose
(
expected_result_2
,
result_np_2
)
)
np
.
testing
.
assert_allclose
(
expected_result_2
,
result_np_2
)
def
test_single_tensor_error
(
self
):
def
test_single_tensor_error
(
self
):
with
fluid
.
dygraph
.
guard
(
place
=
paddle
.
NPUPlace
(
0
)):
with
fluid
.
dygraph
.
guard
(
place
=
paddle
.
NPUPlace
(
0
)):
...
...
python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py
浏览文件 @
73be70a3
...
@@ -145,8 +145,8 @@ class TestTanhNet(unittest.TestCase):
...
@@ -145,8 +145,8 @@ class TestTanhNet(unittest.TestCase):
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
cpu_pred
,
cpu_loss
=
self
.
_test
(
False
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
npu_pred
,
npu_loss
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_pred
,
cpu_pred
)
)
np
.
testing
.
assert_allclose
(
npu_pred
,
cpu_pred
,
rtol
=
1e-6
)
self
.
assertTrue
(
np
.
allclose
(
npu_loss
,
cpu_loss
)
)
np
.
testing
.
assert_allclose
(
npu_loss
,
cpu_loss
,
rtol
=
1e-6
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py
浏览文件 @
73be70a3
...
@@ -241,47 +241,47 @@ class TestTopKAPI(unittest.TestCase):
...
@@ -241,47 +241,47 @@ class TestTopKAPI(unittest.TestCase):
# test case for basic test case 1
# test case for basic test case 1
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
)
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
]
)
# test case for basic test case 2 with axis
# test case for basic test case 2 with axis
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=
1
)
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
]
)
# test case for basic test case 3 with tensor K
# test case for basic test case 3 with tensor K
k_tensor
=
paddle
.
to_tensor
(
np
.
array
([
2
]))
k_tensor
=
paddle
.
to_tensor
(
np
.
array
([
2
]))
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
k_tensor
,
axis
=
1
)
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
k_tensor
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
]
)
# test case for basic test case 4 with tensor largest
# test case for basic test case 4 with tensor largest
k_tensor
=
paddle
.
to_tensor
(
np
.
array
([
2
]))
k_tensor
=
paddle
.
to_tensor
(
np
.
array
([
2
]))
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=
1
,
largest
=
False
)
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=
1
,
largest
=
False
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
,
largest
=
False
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
,
largest
=
False
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
]
)
# test case for basic test case 5 with axis -1
# test case for basic test case 5 with axis -1
k_tensor
=
paddle
.
to_tensor
(
np
.
array
([
2
]))
k_tensor
=
paddle
.
to_tensor
(
np
.
array
([
2
]))
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=-
1
,
largest
=
False
)
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=-
1
,
largest
=
False
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=-
1
,
largest
=
False
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=-
1
,
largest
=
False
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
]
)
# test case for basic test case 6 for the partial sort
# test case for basic test case 6 for the partial sort
paddle_result
=
paddle
.
topk
(
large_input_tensor
,
k
=
1
,
axis
=-
1
)
paddle_result
=
paddle
.
topk
(
large_input_tensor
,
k
=
1
,
axis
=-
1
)
numpy_result
=
numpy_topk
(
self
.
large_input_data
,
k
=
1
,
axis
=-
1
)
numpy_result
=
numpy_topk
(
self
.
large_input_data
,
k
=
1
,
axis
=-
1
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
].
numpy
(),
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
].
numpy
(),
numpy_result
[
1
]
)
# test case for basic test case 7 for the unsorted
# test case for basic test case 7 for the unsorted
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=
1
,
sorted
=
False
)
paddle_result
=
paddle
.
topk
(
input_tensor
,
k
=
2
,
axis
=
1
,
sorted
=
False
)
sort_paddle
=
numpy_topk
(
np
.
array
(
paddle_result
[
0
].
numpy
()),
sort_paddle
=
numpy_topk
(
np
.
array
(
paddle_result
[
0
].
numpy
()),
axis
=
1
,
axis
=
1
,
k
=
2
)
k
=
2
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
sort_paddle
[
0
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
sort_paddle
[
0
],
numpy_result
[
0
]
)
def
run_static
(
self
,
place
):
def
run_static
(
self
,
place
):
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -319,37 +319,37 @@ class TestTopKAPI(unittest.TestCase):
...
@@ -319,37 +319,37 @@ class TestTopKAPI(unittest.TestCase):
result7
[
0
],
result7
[
1
]
result7
[
0
],
result7
[
1
]
])
])
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
0
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
0
],
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
1
],
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
1
],
numpy_result
[
1
]
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=-
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=-
1
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
2
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
2
],
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
3
],
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
3
],
numpy_result
[
1
]
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
4
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
4
],
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
5
],
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
5
],
numpy_result
[
1
]
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
k
=
2
,
axis
=
1
,
axis
=
1
,
largest
=
False
)
largest
=
False
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
6
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
6
],
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
7
],
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
7
],
numpy_result
[
1
]
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
k
=
2
,
axis
=-
1
,
axis
=-
1
,
largest
=
False
)
largest
=
False
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
8
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
8
],
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
9
],
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
9
],
numpy_result
[
1
]
)
numpy_result
=
numpy_topk
(
self
.
large_input_data
,
k
=
1
,
axis
=-
1
)
numpy_result
=
numpy_topk
(
self
.
large_input_data
,
k
=
1
,
axis
=-
1
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
10
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
10
],
numpy_result
[
0
]
)
self
.
assertTrue
(
np
.
allclose
(
paddle_result
[
11
],
numpy_result
[
1
])
)
np
.
testing
.
assert_allclose
(
paddle_result
[
11
],
numpy_result
[
1
]
)
sort_paddle
=
numpy_topk
(
paddle_result
[
12
],
axis
=
1
,
k
=
2
)
sort_paddle
=
numpy_topk
(
paddle_result
[
12
],
axis
=
1
,
k
=
2
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
numpy_result
=
numpy_topk
(
self
.
input_data
,
k
=
2
,
axis
=
1
)
self
.
assertTrue
(
np
.
allclose
(
sort_paddle
[
0
],
numpy_result
[
0
])
)
np
.
testing
.
assert_allclose
(
sort_paddle
[
0
],
numpy_result
[
0
]
)
def
test_cases
(
self
):
def
test_cases
(
self
):
places
=
[
core
.
NPUPlace
(
0
)]
places
=
[
core
.
NPUPlace
(
0
)]
...
...
python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py
浏览文件 @
73be70a3
...
@@ -157,8 +157,8 @@ class TestTrilTriuOpAPI(unittest.TestCase):
...
@@ -157,8 +157,8 @@ class TestTrilTriuOpAPI(unittest.TestCase):
feed
=
{
"x"
:
data
},
feed
=
{
"x"
:
data
},
fetch_list
=
[
tril_out
,
triu_out
],
fetch_list
=
[
tril_out
,
triu_out
],
)
)
self
.
assertTrue
(
np
.
allclose
(
tril_out
,
np
.
tril
(
data
)
))
np
.
testing
.
assert_allclose
(
tril_out
,
np
.
tril
(
data
))
self
.
assertTrue
(
np
.
allclose
(
triu_out
,
np
.
triu
(
data
)
))
np
.
testing
.
assert_allclose
(
triu_out
,
np
.
triu
(
data
))
def
test_api_with_dygraph
(
self
):
def
test_api_with_dygraph
(
self
):
paddle
.
disable_static
(
fluid
.
NPUPlace
(
0
))
paddle
.
disable_static
(
fluid
.
NPUPlace
(
0
))
...
@@ -170,8 +170,8 @@ class TestTrilTriuOpAPI(unittest.TestCase):
...
@@ -170,8 +170,8 @@ class TestTrilTriuOpAPI(unittest.TestCase):
x
=
fluid
.
dygraph
.
to_variable
(
data
)
x
=
fluid
.
dygraph
.
to_variable
(
data
)
tril_out
,
triu_out
=
tensor
.
tril
(
x
).
numpy
(),
tensor
.
triu
(
tril_out
,
triu_out
=
tensor
.
tril
(
x
).
numpy
(),
tensor
.
triu
(
x
).
numpy
()
x
).
numpy
()
self
.
assertTrue
(
np
.
allclose
(
tril_out
,
np
.
tril
(
data
)
))
np
.
testing
.
assert_allclose
(
tril_out
,
np
.
tril
(
data
))
self
.
assertTrue
(
np
.
allclose
(
triu_out
,
np
.
triu
(
data
)
))
np
.
testing
.
assert_allclose
(
triu_out
,
np
.
triu
(
data
))
def
test_fluid_api
(
self
):
def
test_fluid_api
(
self
):
paddle
.
enable_static
()
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py
浏览文件 @
73be70a3
...
@@ -66,7 +66,7 @@ class TestTruncatedNormal(unittest.TestCase):
...
@@ -66,7 +66,7 @@ class TestTruncatedNormal(unittest.TestCase):
cpu_w
=
self
.
_test
(
False
)
cpu_w
=
self
.
_test
(
False
)
npu_w
=
self
.
_test
(
True
)
npu_w
=
self
.
_test
(
True
)
self
.
assertTrue
(
np
.
allclose
(
npu_w
,
cpu_w
)
)
np
.
testing
.
assert_allclose
(
npu_w
,
cpu_w
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py
浏览文件 @
73be70a3
...
@@ -71,8 +71,11 @@ class TestNPUUniformRandomOp(OpTest):
...
@@ -71,8 +71,11 @@ class TestNPUUniformRandomOp(OpTest):
def
verify_output
(
self
,
outs
):
def
verify_output
(
self
,
outs
):
hist
,
prob
=
self
.
output_hist
(
np
.
array
(
outs
[
0
]))
hist
,
prob
=
self
.
output_hist
(
np
.
array
(
outs
[
0
]))
self
.
assertTrue
(
np
.
allclose
(
hist
,
prob
,
rtol
=
0
,
atol
=
0.01
),
np
.
testing
.
assert_allclose
(
hist
,
"hist: "
+
str
(
hist
))
prob
,
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"hist: "
+
str
(
hist
))
class
TestNPUUniformRandomOpSelectedRows
(
unittest
.
TestCase
):
class
TestNPUUniformRandomOpSelectedRows
(
unittest
.
TestCase
):
...
@@ -100,8 +103,11 @@ class TestNPUUniformRandomOpSelectedRows(unittest.TestCase):
...
@@ -100,8 +103,11 @@ class TestNPUUniformRandomOpSelectedRows(unittest.TestCase):
op
.
run
(
scope
,
place
)
op
.
run
(
scope
,
place
)
self
.
assertEqual
(
out
.
get_tensor
().
shape
(),
[
1000
,
784
])
self
.
assertEqual
(
out
.
get_tensor
().
shape
(),
[
1000
,
784
])
hist
,
prob
=
output_hist
(
np
.
array
(
out
.
get_tensor
()))
hist
,
prob
=
output_hist
(
np
.
array
(
out
.
get_tensor
()))
self
.
assertTrue
(
np
.
allclose
(
hist
,
prob
,
rtol
=
0
,
atol
=
0.01
),
np
.
testing
.
assert_allclose
(
hist
,
"hist: "
+
str
(
hist
))
prob
,
rtol
=
0
,
atol
=
0.01
,
err_msg
=
"hist: "
+
str
(
hist
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录