Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f8a8dd5e
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f8a8dd5e
编写于
3月 23, 2023
作者:
W
wanghuancoder
提交者:
GitHub
3月 23, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete old dygraph xpu op test (#51955)
* delete old dygraph xpu op test
上级
cc9bbd5b
变更
36
显示空白变更内容
内联
并排
Showing
36 changed file
with
119 addition
and
78 deletion
+119
-78
python/paddle/fluid/tests/unittests/eager_op_test.py
python/paddle/fluid/tests/unittests/eager_op_test.py
+79
-23
python/paddle/fluid/tests/unittests/op_test_xpu.py
python/paddle/fluid/tests/unittests/op_test_xpu.py
+1
-7
python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_atan_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_atan_op_xpu.py
+1
-4
python/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_diag_v2_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_diag_v2_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py
...uid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py
...d/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py
.../fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py
...le/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_fill_diagonal_tensor_op_xpu.py
...d/tests/unittests/xpu/test_fill_diagonal_tensor_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py
...sts/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py
...paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_group_norm_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_group_norm_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py
...ddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_pixel_shuffle_op_xpu.py
...le/fluid/tests/unittests/xpu/test_pixel_shuffle_op_xpu.py
+1
-4
python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py
+1
-4
python/paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py
...e/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py
+1
-1
未找到文件。
python/paddle/fluid/tests/unittests/eager_op_test.py
浏览文件 @
f8a8dd5e
...
@@ -930,7 +930,14 @@ class OpTest(unittest.TestCase):
...
@@ -930,7 +930,14 @@ class OpTest(unittest.TestCase):
args
,
len
(
inputs_sig
)
args
,
len
(
inputs_sig
)
)
)
ret_tuple
=
python_api
(
*
args
)
ret_tuple
=
python_api
(
*
args
)
return
construct_output_dict_by_kernel_sig
(
ret_tuple
,
outputs_sig
)
result
=
construct_output_dict_by_kernel_sig
(
ret_tuple
,
outputs_sig
)
if
hasattr
(
self
,
"python_out_sig_sub_name"
):
for
key
in
self
.
python_out_sig_sub_name
.
keys
():
for
i
in
range
(
len
(
self
.
python_out_sig_sub_name
[
key
])):
result
[
key
][
0
][
i
].
name
=
self
.
python_out_sig_sub_name
[
key
][
i
]
return
result
with
fluid
.
dygraph
.
base
.
guard
(
place
=
place
):
with
fluid
.
dygraph
.
base
.
guard
(
place
=
place
):
block
=
fluid
.
default_main_program
().
global_block
()
block
=
fluid
.
default_main_program
().
global_block
()
...
@@ -965,7 +972,11 @@ class OpTest(unittest.TestCase):
...
@@ -965,7 +972,11 @@ class OpTest(unittest.TestCase):
dygraph_tensor_outputs
,
dygraph_tensor_outputs
,
attrs_outputs
,
attrs_outputs
,
)
)
if
not
kernel_sig
:
if
not
kernel_sig
or
(
len
(
kernel_sig
[
0
])
==
0
and
len
(
kernel_sig
[
1
])
==
0
and
len
(
kernel_sig
[
2
])
==
0
):
return
None
return
None
if
not
hasattr
(
self
,
"python_api"
):
if
not
hasattr
(
self
,
"python_api"
):
print
(
kernel_sig
)
print
(
kernel_sig
)
...
@@ -1514,13 +1525,23 @@ class OpTest(unittest.TestCase):
...
@@ -1514,13 +1525,23 @@ class OpTest(unittest.TestCase):
core
.
_set_prim_all_enabled
(
False
)
core
.
_set_prim_all_enabled
(
False
)
core
.
set_prim_eager_enabled
(
False
)
core
.
set_prim_eager_enabled
(
False
)
if
hasattr
(
self
,
"use_custom_device"
)
and
self
.
use_custom_device
():
check_dygraph
=
False
def
find_imperative_actual
(
target_name
,
dygraph_outs
,
place
):
def
find_imperative_actual
(
target_name
,
dygraph_outs
,
place
):
for
name
in
dygraph_outs
:
for
name
in
dygraph_outs
:
if
name
==
target_name
:
if
name
==
target_name
:
return
dygraph_outs
[
name
][
0
]
return
dygraph_outs
[
name
][
0
]
var_list
=
dygraph_outs
[
name
]
var_list
=
dygraph_outs
[
name
]
for
i
,
var
in
enumerate
(
var_list
):
for
i
,
var
in
enumerate
(
var_list
):
if
var
.
name
==
target_name
:
if
isinstance
(
var
,
list
):
for
tensor
in
var
:
if
tensor
.
name
==
target_name
:
return
tensor
elif
(
isinstance
(
var
,
paddle
.
Tensor
)
and
var
.
name
==
target_name
):
return
dygraph_outs
[
name
][
i
]
return
dygraph_outs
[
name
][
i
]
self
.
assertTrue
(
self
.
assertTrue
(
False
,
False
,
...
@@ -1653,6 +1674,8 @@ class OpTest(unittest.TestCase):
...
@@ -1653,6 +1674,8 @@ class OpTest(unittest.TestCase):
actual_np
,
expect_np
=
self
.
convert_uint16_to_float_ifneed
(
actual_np
,
expect_np
=
self
.
convert_uint16_to_float_ifneed
(
actual_np
,
expect_np
actual_np
,
expect_np
)
)
# modify there for fp32 check
# NOTE(zhiqiu): np.allclose([], [1.]) returns True
# NOTE(zhiqiu): np.allclose([], [1.]) returns True
# see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng
# see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng
if
expect_np
.
size
==
0
:
if
expect_np
.
size
==
0
:
...
@@ -1768,19 +1791,18 @@ class OpTest(unittest.TestCase):
...
@@ -1768,19 +1791,18 @@ class OpTest(unittest.TestCase):
place
,
no_check_set
=
no_check_set
place
,
no_check_set
=
no_check_set
)
)
self
.
outputs
=
dygraph_outs
self
.
outputs
=
dygraph_outs
if
self
.
op_test
.
is_fp16_compared_with_fp32
():
if
self
.
op_test
.
is_fp16_compared_with_fp32
():
self
.
op_test
.
enable_cal_ref_output
()
self
.
op_test
.
enable_cal_ref_output
()
self
.
is_python_api_test
=
True
self
.
is_python_api_test
=
True
ref_dygraph_o
uts
=
self
.
op_test
.
_calc_python_api_output
(
self
.
ref_outp
uts
=
self
.
op_test
.
_calc_python_api_output
(
place
place
)
)
if
ref_dygraph_o
uts
is
None
:
if
self
.
ref_outp
uts
is
None
:
self
.
is_python_api_test
=
False
self
.
is_python_api_test
=
False
ref_dygraph_outs
=
self
.
op_test
.
_calc_dygraph_output
(
# missing KernelSignature, fall back to eager middle output.
self
.
ref_outputs
=
self
.
op_test
.
_calc_dygraph_output
(
place
,
no_check_set
=
no_check_set
place
,
no_check_set
=
no_check_set
)
)
self
.
ref_outputs
=
ref_dygraph_outs
self
.
op_test
.
disable_cal_ref_output
()
self
.
op_test
.
disable_cal_ref_output
()
def
_compare_numpy
(
self
,
name
,
actual_np
,
expect_np
):
def
_compare_numpy
(
self
,
name
,
actual_np
,
expect_np
):
...
@@ -1911,7 +1933,7 @@ class OpTest(unittest.TestCase):
...
@@ -1911,7 +1933,7 @@ class OpTest(unittest.TestCase):
else
:
else
:
atol
=
2
if
atol
<
2
else
atol
atol
=
2
if
atol
<
2
else
atol
else
:
else
:
atol
=
1e-
1
if
atol
<
1e-1
else
atol
atol
=
1e-
2
if
atol
<
1e-2
else
atol
if
self
.
is_float16_op
():
if
self
.
is_float16_op
():
atol
=
1e-3
if
atol
<
1e-3
else
atol
atol
=
1e-3
if
atol
<
1e-3
else
atol
...
@@ -2050,6 +2072,9 @@ class OpTest(unittest.TestCase):
...
@@ -2050,6 +2072,9 @@ class OpTest(unittest.TestCase):
if
self
.
is_xpu_op
():
if
self
.
is_xpu_op
():
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
use_xpu
=
True
if
hasattr
(
self
,
"use_custom_device"
)
and
self
.
use_custom_device
():
check_dygraph
=
False
places
=
self
.
_get_places
()
places
=
self
.
_get_places
()
for
place
in
places
:
for
place
in
places
:
res
=
self
.
check_output_with_place
(
res
=
self
.
check_output_with_place
(
...
@@ -2072,6 +2097,7 @@ class OpTest(unittest.TestCase):
...
@@ -2072,6 +2097,7 @@ class OpTest(unittest.TestCase):
self
.
check_compile_vs_runtime
(
fetch_list
,
outs
)
self
.
check_compile_vs_runtime
(
fetch_list
,
outs
)
def
check_output_customized
(
self
,
checker
,
custom_place
=
None
):
def
check_output_customized
(
self
,
checker
,
custom_place
=
None
):
self
.
__class__
.
op_type
=
self
.
op_type
places
=
self
.
_get_places
()
places
=
self
.
_get_places
()
if
custom_place
:
if
custom_place
:
places
.
append
(
custom_place
)
places
.
append
(
custom_place
)
...
@@ -2160,6 +2186,9 @@ class OpTest(unittest.TestCase):
...
@@ -2160,6 +2186,9 @@ class OpTest(unittest.TestCase):
else
:
else
:
abs_a
=
1
if
abs_a
<
1e-3
else
abs_a
abs_a
=
1
if
abs_a
<
1e-3
else
abs_a
if
self
.
dtype
==
np
.
bool_
:
diff_mat
=
np
.
abs
(
a
^
b
)
/
abs_a
else
:
diff_mat
=
np
.
abs
(
a
-
b
)
/
abs_a
diff_mat
=
np
.
abs
(
a
-
b
)
/
abs_a
max_diff
=
np
.
max
(
diff_mat
)
max_diff
=
np
.
max
(
diff_mat
)
...
@@ -2205,6 +2234,9 @@ class OpTest(unittest.TestCase):
...
@@ -2205,6 +2234,9 @@ class OpTest(unittest.TestCase):
only_check_prim
=
False
,
only_check_prim
=
False
,
atol
=
1e-5
,
atol
=
1e-5
,
):
):
if
hasattr
(
self
,
"use_custom_device"
)
and
self
.
use_custom_device
():
check_dygraph
=
False
self
.
_check_grad_helper
()
self
.
_check_grad_helper
()
places
=
self
.
_get_places
()
places
=
self
.
_get_places
()
for
place
in
places
:
for
place
in
places
:
...
@@ -2241,6 +2273,9 @@ class OpTest(unittest.TestCase):
...
@@ -2241,6 +2273,9 @@ class OpTest(unittest.TestCase):
numeric_place
=
None
,
numeric_place
=
None
,
atol
=
1e-5
,
atol
=
1e-5
,
):
):
if
hasattr
(
self
,
"use_custom_device"
)
and
self
.
use_custom_device
():
check_dygraph
=
False
core
.
_set_prim_all_enabled
(
False
)
core
.
_set_prim_all_enabled
(
False
)
core
.
set_prim_eager_enabled
(
False
)
core
.
set_prim_eager_enabled
(
False
)
if
check_prim
:
if
check_prim
:
...
@@ -2337,6 +2372,17 @@ class OpTest(unittest.TestCase):
...
@@ -2337,6 +2372,17 @@ class OpTest(unittest.TestCase):
if
numeric_place
is
None
:
if
numeric_place
is
None
:
numeric_place
=
place
numeric_place
=
place
if
user_defined_grads
is
None
and
self
.
is_fp16_compared_with_fp32
():
self
.
enable_cal_ref_output
()
numeric_grads
=
self
.
_get_gradient
(
inputs_to_check
,
place
,
output_names
,
no_grad_set
,
user_defined_grad_outputs
,
)
self
.
disable_cal_ref_output
()
else
:
numeric_grads
=
user_defined_grads
or
[
numeric_grads
=
user_defined_grads
or
[
get_numeric_gradient
(
get_numeric_gradient
(
numeric_place
,
numeric_place
,
...
@@ -2350,6 +2396,7 @@ class OpTest(unittest.TestCase):
...
@@ -2350,6 +2396,7 @@ class OpTest(unittest.TestCase):
)
)
for
input_to_check
in
inputs_to_check
for
input_to_check
in
inputs_to_check
]
]
analytic_grads
=
self
.
_get_gradient
(
analytic_grads
=
self
.
_get_gradient
(
inputs_to_check
,
inputs_to_check
,
place
,
place
,
...
@@ -2429,8 +2476,14 @@ class OpTest(unittest.TestCase):
...
@@ -2429,8 +2476,14 @@ class OpTest(unittest.TestCase):
else
:
else
:
for
output_vars_index
in
output_vars
:
for
output_vars_index
in
output_vars
:
for
output_vars_selected
in
output_vars
[
output_vars_index
]:
for
output_vars_selected
in
output_vars
[
output_vars_index
]:
if
isinstance
(
output_vars_selected
,
list
):
for
tensor
in
output_vars_selected
:
if
tensor
.
name
==
name
:
return
[
tensor
]
elif
isinstance
(
output_vars_selected
,
paddle
.
Tensor
):
if
output_vars_selected
.
name
==
name
:
if
output_vars_selected
.
name
==
name
:
return
output_vars_selected
return
[
output_vars_selected
]
raise
AssertionError
(
name
,
" not in outputs:"
,
output_vars
.
keys
())
def
_get_dygraph_grad
(
def
_get_dygraph_grad
(
self
,
self
,
...
@@ -2441,6 +2494,9 @@ class OpTest(unittest.TestCase):
...
@@ -2441,6 +2494,9 @@ class OpTest(unittest.TestCase):
no_grad_set
=
None
,
no_grad_set
=
None
,
check_dygraph
=
True
,
check_dygraph
=
True
,
):
):
if
hasattr
(
self
,
"use_custom_device"
)
and
self
.
use_custom_device
():
check_dygraph
=
False
with
fluid
.
dygraph
.
base
.
guard
(
place
=
place
):
with
fluid
.
dygraph
.
base
.
guard
(
place
=
place
):
block
=
fluid
.
default_main_program
().
global_block
()
block
=
fluid
.
default_main_program
().
global_block
()
...
...
python/paddle/fluid/tests/unittests/op_test_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
testsuite
import
append_loss_ops
,
create_op
,
set_input
from
testsuite
import
append_loss_ops
,
create_op
,
set_input
from
white_list
import
no_grad_set_white_list
,
op_threshold_white_list
from
white_list
import
no_grad_set_white_list
,
op_threshold_white_list
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
...
@@ -71,7 +71,6 @@ class XPUOpTest(OpTest):
...
@@ -71,7 +71,6 @@ class XPUOpTest(OpTest):
equal_nan
=
False
,
equal_nan
=
False
,
check_dygraph
=
True
,
check_dygraph
=
True
,
inplace_atol
=
None
,
inplace_atol
=
None
,
check_eager
=
False
,
):
):
place
=
paddle
.
XPUPlace
(
0
)
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
self
.
check_output_with_place
(
...
@@ -81,7 +80,6 @@ class XPUOpTest(OpTest):
...
@@ -81,7 +80,6 @@ class XPUOpTest(OpTest):
equal_nan
,
equal_nan
,
check_dygraph
,
check_dygraph
,
inplace_atol
,
inplace_atol
,
check_eager
,
)
)
def
check_output_with_place
(
def
check_output_with_place
(
...
@@ -92,7 +90,6 @@ class XPUOpTest(OpTest):
...
@@ -92,7 +90,6 @@ class XPUOpTest(OpTest):
equal_nan
=
False
,
equal_nan
=
False
,
check_dygraph
=
True
,
check_dygraph
=
True
,
inplace_atol
=
None
,
inplace_atol
=
None
,
check_eager
=
False
,
):
):
self
.
infer_dtype_from_inputs_outputs
(
self
.
inputs
,
self
.
outputs
)
self
.
infer_dtype_from_inputs_outputs
(
self
.
inputs
,
self
.
outputs
)
if
self
.
dtype
==
np
.
float64
:
if
self
.
dtype
==
np
.
float64
:
...
@@ -120,7 +117,6 @@ class XPUOpTest(OpTest):
...
@@ -120,7 +117,6 @@ class XPUOpTest(OpTest):
user_defined_grad_outputs
=
None
,
user_defined_grad_outputs
=
None
,
check_dygraph
=
True
,
check_dygraph
=
True
,
numeric_place
=
None
,
numeric_place
=
None
,
check_eager
=
False
,
):
):
place
=
paddle
.
XPUPlace
(
0
)
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
...
@@ -135,7 +131,6 @@ class XPUOpTest(OpTest):
...
@@ -135,7 +131,6 @@ class XPUOpTest(OpTest):
user_defined_grad_outputs
,
user_defined_grad_outputs
,
check_dygraph
,
check_dygraph
,
numeric_place
,
numeric_place
,
check_eager
,
)
)
def
check_grad_with_place
(
def
check_grad_with_place
(
...
@@ -151,7 +146,6 @@ class XPUOpTest(OpTest):
...
@@ -151,7 +146,6 @@ class XPUOpTest(OpTest):
user_defined_grad_outputs
=
None
,
user_defined_grad_outputs
=
None
,
check_dygraph
=
True
,
check_dygraph
=
True
,
numeric_place
=
None
,
numeric_place
=
None
,
check_eager
=
False
,
):
):
if
hasattr
(
self
,
'op_type_need_check_grad'
):
if
hasattr
(
self
,
'op_type_need_check_grad'
):
xpu_version
=
core
.
get_xpu_device_version
(
0
)
xpu_version
=
core
.
get_xpu_device_version
(
0
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_atan_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -40,7 +40,6 @@ class XPUTestAtanOp(XPUOpTestWrapper):
...
@@ -40,7 +40,6 @@ class XPUTestAtanOp(XPUOpTestWrapper):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
set_xpu
()
self
.
op_type
=
"atan"
self
.
op_type
=
"atan"
self
.
eager_mode
=
True
# override
# override
self
.
init_input_shape
()
self
.
init_input_shape
()
...
@@ -62,9 +61,7 @@ class XPUTestAtanOp(XPUOpTestWrapper):
...
@@ -62,9 +61,7 @@ class XPUTestAtanOp(XPUOpTestWrapper):
self
.
check_output_with_place
(
self
.
place
)
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
)
self
.
place
,
[
'X'
],
'Out'
,
check_eager
=
self
.
eager_mode
)
class
Test1x1
(
TestAtanOp
):
class
Test1x1
(
TestAtanOp
):
def
init_input_shape
(
self
):
def
init_input_shape
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -97,7 +97,7 @@ class XPUTestClipOp(XPUOpTestWrapper):
...
@@ -97,7 +97,7 @@ class XPUTestClipOp(XPUOpTestWrapper):
if
core
.
is_compiled_with_xpu
():
if
core
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
paddle
.
enable_static
()
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
,
check_
eager
=
True
self
.
place
,
[
'X'
],
'Out'
,
check_
dygraph
=
True
)
)
paddle
.
disable_static
()
paddle
.
disable_static
()
...
...
python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -18,7 +18,7 @@ sys.path.append("..")
...
@@ -18,7 +18,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_diag_v2_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -58,11 +58,11 @@ class XPUTestDiagV2Op(XPUOpTestWrapper):
...
@@ -58,11 +58,11 @@ class XPUTestDiagV2Op(XPUOpTestWrapper):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
paddle
.
enable_static
()
paddle
.
enable_static
()
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
paddle
.
enable_static
()
paddle
.
enable_static
()
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
],
'Out'
,
check_
dygraph
=
False
)
def
init_config
(
self
):
def
init_config
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -20,7 +20,7 @@ import numpy as np
...
@@ -20,7 +20,7 @@ import numpy as np
import
paddle
import
paddle
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
import
unittest
import
unittest
from
op_test
import
OpTest
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
import
unittest
import
unittest
from
op_test
import
OpTest
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
import
paddle
import
paddle
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -17,7 +17,7 @@ sys.path.append("..")
...
@@ -17,7 +17,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
import
unittest
import
unittest
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -18,7 +18,7 @@ sys.path.append("..")
...
@@ -18,7 +18,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
convert_float_to_uint16
from
eager_
op_test
import
convert_float_to_uint16
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_fill_diagonal_tensor_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -20,7 +20,7 @@ import numpy as np
...
@@ -20,7 +20,7 @@ import numpy as np
import
paddle
import
paddle
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -18,7 +18,7 @@ sys.path.append("..")
...
@@ -18,7 +18,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
create_test_class
,
create_test_class
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -52,7 +52,7 @@ class XPUTestGatherNd(XPUOpTestWrapper):
...
@@ -52,7 +52,7 @@ class XPUTestGatherNd(XPUOpTestWrapper):
self
.
check_output_with_place
(
self
.
place
)
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
],
'Out'
,
check_
dygraph
=
False
)
def
init_data
(
self
):
def
init_data
(
self
):
self
.
xnp
=
np
.
random
.
random
((
5
,
20
)).
astype
(
self
.
in_type
)
self
.
xnp
=
np
.
random
.
random
((
5
,
20
)).
astype
(
self
.
in_type
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_group_norm_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -72,7 +72,7 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
...
@@ -72,7 +72,7 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
self
.
outputs
=
{
'Loss'
:
loss
.
astype
(
'float32'
)}
self
.
outputs
=
{
'Loss'
:
loss
.
astype
(
'float32'
)}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
True
)
self
.
check_output
(
check_
dygraph
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
...
@@ -80,7 +80,7 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
...
@@ -80,7 +80,7 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
[
'X'
],
[
'X'
],
'Loss'
,
'Loss'
,
no_grad_set
=
set
([
"Target"
]),
no_grad_set
=
set
([
"Target"
]),
check_
eager
=
True
,
check_
dygraph
=
True
,
)
)
def
initTestCase
(
self
):
def
initTestCase
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -18,7 +18,7 @@ sys.path.append("..")
...
@@ -18,7 +18,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle
...
...
python/paddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -91,14 +91,14 @@ class XPUTestLogSoftmaxOp(XPUOpTestWrapper):
...
@@ -91,14 +91,14 @@ class XPUTestLogSoftmaxOp(XPUOpTestWrapper):
pass
pass
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
True
)
self
.
check_output
(
check_
dygraph
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
(
self
.
check_grad
(
[
'X'
],
[
'X'
],
[
'Out'
],
[
'Out'
],
user_defined_grads
=
[
self
.
x_grad
],
user_defined_grads
=
[
self
.
x_grad
],
check_
eager
=
True
,
check_
dygraph
=
True
,
)
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -94,10 +94,10 @@ class XPUTestPad3dOp(XPUOpTestWrapper):
...
@@ -94,10 +94,10 @@ class XPUTestPad3dOp(XPUOpTestWrapper):
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
True
)
self
.
check_output
(
check_
dygraph
=
True
)
def
test_check_grad_normal
(
self
):
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_
dygraph
=
True
)
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
shape
=
(
2
,
3
,
4
,
5
,
6
)
self
.
shape
=
(
2
,
3
,
4
,
5
,
6
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_pixel_shuffle_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -78,7 +78,6 @@ class XPUTestPixelShuffleOp(XPUOpTestWrapper):
...
@@ -78,7 +78,6 @@ class XPUTestPixelShuffleOp(XPUOpTestWrapper):
self
.
set_xpu
()
self
.
set_xpu
()
self
.
op_type
=
"pixel_shuffle"
self
.
op_type
=
"pixel_shuffle"
self
.
init_dtype
()
self
.
init_dtype
()
self
.
eager_mode
=
True
# override
# override
self
.
init_input_shape
()
self
.
init_input_shape
()
...
@@ -109,9 +108,7 @@ class XPUTestPixelShuffleOp(XPUOpTestWrapper):
...
@@ -109,9 +108,7 @@ class XPUTestPixelShuffleOp(XPUOpTestWrapper):
self
.
check_output_with_place
(
self
.
place
)
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
)
self
.
place
,
[
'X'
],
'Out'
,
check_eager
=
self
.
eager_mode
)
class
TestNHWC
(
TestPixelShuffleOp
):
class
TestNHWC
(
TestPixelShuffleOp
):
def
init_input_shape
(
self
):
def
init_input_shape
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -43,7 +43,6 @@ class XPUTestPReluOp(XPUOpTestWrapper):
...
@@ -43,7 +43,6 @@ class XPUTestPReluOp(XPUOpTestWrapper):
self
.
set_xpu
()
self
.
set_xpu
()
self
.
op_type
=
"prelu"
self
.
op_type
=
"prelu"
self
.
init_dtype
()
self
.
init_dtype
()
self
.
eager_mode
=
True
# override
# override
self
.
init_input_shape
()
self
.
init_input_shape
()
...
@@ -70,8 +69,6 @@ class XPUTestPReluOp(XPUOpTestWrapper):
...
@@ -70,8 +69,6 @@ class XPUTestPReluOp(XPUOpTestWrapper):
)
)
else
:
else
:
self
.
alpha
=
np
.
random
.
uniform
(
-
1
,
-
0.5
,
[
1
]
+
self
.
x_shape
[
1
:])
self
.
alpha
=
np
.
random
.
uniform
(
-
1
,
-
0.5
,
[
1
]
+
self
.
x_shape
[
1
:])
# eager check don't support mode = 'all'
self
.
eager_mode
=
False
self
.
alpha
=
self
.
alpha
.
astype
(
self
.
dtype
)
self
.
alpha
=
self
.
alpha
.
astype
(
self
.
dtype
)
self
.
inputs
=
{
'X'
:
self
.
x
,
'Alpha'
:
self
.
alpha
}
self
.
inputs
=
{
'X'
:
self
.
x
,
'Alpha'
:
self
.
alpha
}
...
@@ -115,7 +112,7 @@ class XPUTestPReluOp(XPUOpTestWrapper):
...
@@ -115,7 +112,7 @@ class XPUTestPReluOp(XPUOpTestWrapper):
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
,
'Alpha'
],
'Out'
,
check_
eager
=
self
.
eager_mod
e
self
.
place
,
[
'X'
,
'Alpha'
],
'Out'
,
check_
dygraph
=
Fals
e
)
)
class
TestModeChannelNHWC
(
TestPReluOp
):
class
TestModeChannelNHWC
(
TestPReluOp
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -18,7 +18,7 @@ sys.path.append("..")
...
@@ -18,7 +18,7 @@ sys.path.append("..")
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
skip_check_grad_ci
from
eager_
op_test
import
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
(
from
xpu.get_test_cover_info
import
(
XPUOpTestWrapper
,
XPUOpTestWrapper
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -83,10 +83,10 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper):
...
@@ -83,10 +83,10 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper):
self
.
python_out_sig
=
[
"Out"
]
self
.
python_out_sig
=
[
"Out"
]
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
True
)
self
.
check_output
(
check_
dygraph
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_
dygraph
=
True
)
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
x_shape
=
(
6
,
4
,
4
,
4
)
self
.
x_shape
=
(
6
,
4
,
4
,
4
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py
浏览文件 @
f8a8dd5e
...
@@ -311,7 +311,7 @@ class XPUTestWarpCTCOp(XPUOpTestWrapper):
...
@@ -311,7 +311,7 @@ class XPUTestWarpCTCOp(XPUOpTestWrapper):
}
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
outputs
[
'WarpCTCGrad'
]
=
self
.
gradient
self
.
outputs
[
'WarpCTCGrad'
]
=
self
.
gradient
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录