Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ac495981
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ac495981
编写于
3月 10, 2023
作者:
C
Charles-hit
提交者:
GitHub
3月 10, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add reduce_mean and gelu test (#51447)
上级
54331f1a
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
97 addition
and
67 deletion
+97
-67
python/paddle/fluid/tests/unittests/CMakeLists.txt
python/paddle/fluid/tests/unittests/CMakeLists.txt
+2
-1
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+28
-10
python/paddle/fluid/tests/unittests/test_elementwise_add_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_add_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_elementwise_div_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_div_op.py
+8
-8
python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_mul_op.py
+8
-8
python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_sub_op.py
+13
-13
python/paddle/fluid/tests/unittests/test_fill_any_like_op.py
python/paddle/fluid/tests/unittests/test_fill_any_like_op.py
+11
-11
python/paddle/fluid/tests/unittests/test_full_like_op.py
python/paddle/fluid/tests/unittests/test_full_like_op.py
+4
-4
python/paddle/fluid/tests/unittests/test_mean_op.py
python/paddle/fluid/tests/unittests/test_mean_op.py
+17
-6
未找到文件。
python/paddle/fluid/tests/unittests/CMakeLists.txt
浏览文件 @
ac495981
...
@@ -1220,7 +1220,8 @@ set(TEST_CINN_OPS
...
@@ -1220,7 +1220,8 @@ set(TEST_CINN_OPS
test_gather_nd_op
test_gather_nd_op
test_elementwise_pow_op
test_elementwise_pow_op
test_transpose_op
test_transpose_op
test_reshape_op
)
test_reshape_op
test_mean_op
)
foreach
(
TEST_CINN_OPS
${
TEST_CINN_OPS
}
)
foreach
(
TEST_CINN_OPS
${
TEST_CINN_OPS
}
)
if
(
WITH_CINN
)
if
(
WITH_CINN
)
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
ac495981
...
@@ -109,7 +109,7 @@ class TestExpFp32_Prim(OpTest):
...
@@ -109,7 +109,7 @@ class TestExpFp32_Prim(OpTest):
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -123,7 +123,7 @@ class TestExpFp32_Prim(OpTest):
...
@@ -123,7 +123,7 @@ class TestExpFp32_Prim(OpTest):
def
init_shape
(
self
):
def
init_shape
(
self
):
self
.
shape
=
[
12
,
17
]
self
.
shape
=
[
12
,
17
]
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
True
self
.
enable_cinn
=
True
...
@@ -136,7 +136,7 @@ class TestExpPrim_ZeroDim(TestExpFp32_Prim):
...
@@ -136,7 +136,7 @@ class TestExpPrim_ZeroDim(TestExpFp32_Prim):
def
init_shape
(
self
):
def
init_shape
(
self
):
self
.
shape
=
[]
self
.
shape
=
[]
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -319,7 +319,7 @@ class TestSilu(TestActivation):
...
@@ -319,7 +319,7 @@ class TestSilu(TestActivation):
self
.
python_api
=
paddle
.
nn
.
functional
.
silu
self
.
python_api
=
paddle
.
nn
.
functional
.
silu
self
.
init_dtype
()
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_shape
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
np
.
random
.
seed
(
1024
)
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
...
@@ -331,7 +331,7 @@ class TestSilu(TestActivation):
...
@@ -331,7 +331,7 @@ class TestSilu(TestActivation):
def
init_dtype
(
self
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
self
.
dtype
=
np
.
float32
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
...
@@ -342,7 +342,7 @@ class TestSilu_ZeroDim(TestSilu):
...
@@ -342,7 +342,7 @@ class TestSilu_ZeroDim(TestSilu):
def
init_shape
(
self
):
def
init_shape
(
self
):
self
.
shape
=
[]
self
.
shape
=
[]
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -1935,47 +1935,65 @@ def gelu(x, approximate):
...
@@ -1935,47 +1935,65 @@ def gelu(x, approximate):
class
TestGeluApproximate
(
TestActivation
):
class
TestGeluApproximate
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"gelu"
self
.
op_type
=
"gelu"
self
.
prim_op_type
=
"comp"
self
.
python_api
=
paddle
.
nn
.
functional
.
gelu
self
.
init_dtype
()
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_shape
()
approximate
=
True
approximate
=
True
np
.
random
.
seed
(
1024
)
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
out
=
gelu
(
x
,
approximate
)
out
=
gelu
(
x
,
approximate
)
self
.
enable_cinn
=
False
self
.
inputs
=
{
'X'
:
x
}
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
"approximate"
:
approximate
}
self
.
attrs
=
{
"approximate"
:
approximate
}
def
test_check_output
(
self
):
self
.
check_output
(
check_prim
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
if
self
.
dtype
==
np
.
float16
:
return
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGelu
(
TestActivation
):
class
TestGelu
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"gelu"
self
.
op_type
=
"gelu"
self
.
prim_op_type
=
"comp"
self
.
python_api
=
paddle
.
nn
.
functional
.
gelu
self
.
init_dtype
()
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_shape
()
approximate
=
False
approximate
=
False
np
.
random
.
seed
(
2048
)
np
.
random
.
seed
(
2048
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
out
=
gelu
(
x
,
approximate
)
out
=
gelu
(
x
,
approximate
)
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
x
}
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
"approximate"
:
approximate
}
self
.
attrs
=
{
"approximate"
:
approximate
}
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
def
test_check_output
(
self
):
self
.
check_output
(
check_prim
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
if
self
.
dtype
==
np
.
float16
:
return
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGelu_ZeroDim
(
TestGelu
):
class
TestGelu_ZeroDim
(
TestGelu
):
def
init_shape
(
self
):
def
init_shape
(
self
):
self
.
shape
=
[]
self
.
shape
=
[]
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
class
TestGELUAPI
(
unittest
.
TestCase
):
class
TestGELUAPI
(
unittest
.
TestCase
):
# test paddle.nn.GELU, paddle.nn.functional.gelu
# test paddle.nn.GELU, paddle.nn.functional.gelu
...
@@ -3760,7 +3778,7 @@ def create_test_act_fp16_class(
...
@@ -3760,7 +3778,7 @@ def create_test_act_fp16_class(
def
init_dtype
(
self
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float16
self
.
dtype
=
np
.
float16
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
enable_cinn
self
.
enable_cinn
=
enable_cinn
def
test_check_output
(
self
):
def
test_check_output
(
self
):
...
@@ -3814,7 +3832,7 @@ create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
...
@@ -3814,7 +3832,7 @@ create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class
(
TestAtanh
,
grad_atol
=
0.85
)
create_test_act_fp16_class
(
TestAtanh
,
grad_atol
=
0.85
)
create_test_act_fp16_class
(
TestRound
,
grad_check
=
False
)
create_test_act_fp16_class
(
TestRound
,
grad_check
=
False
)
create_test_act_fp16_class
(
TestRelu
,
check_prim
=
True
)
create_test_act_fp16_class
(
TestRelu
,
check_prim
=
True
)
create_test_act_fp16_class
(
TestGelu
)
create_test_act_fp16_class
(
TestGelu
,
check_prim
=
True
)
create_test_act_fp16_class
(
TestBRelu
)
create_test_act_fp16_class
(
TestBRelu
)
create_test_act_fp16_class
(
TestRelu6
)
create_test_act_fp16_class
(
TestRelu6
)
create_test_act_fp16_class
(
TestSoftRelu
,
grad_atol
=
0.85
)
create_test_act_fp16_class
(
TestSoftRelu
,
grad_atol
=
0.85
)
...
...
python/paddle/fluid/tests/unittests/test_elementwise_add_op.py
浏览文件 @
ac495981
...
@@ -38,7 +38,7 @@ class TestElementwiseAddOp(OpTest):
...
@@ -38,7 +38,7 @@ class TestElementwiseAddOp(OpTest):
self
.
init_kernel_type
()
self
.
init_kernel_type
()
self
.
init_axis
()
self
.
init_axis
()
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
self
.
inputs
=
{
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
x
),
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
x
),
...
@@ -105,7 +105,7 @@ class TestElementwiseAddOp(OpTest):
...
@@ -105,7 +105,7 @@ class TestElementwiseAddOp(OpTest):
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
self
.
check_prim
=
self
.
axis
==
-
1
self
.
check_prim
=
self
.
axis
==
-
1
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -115,7 +115,7 @@ class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
...
@@ -115,7 +115,7 @@ class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
add
(
self
.
x
,
self
.
y
)
self
.
out
=
np
.
add
(
self
.
x
,
self
.
y
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -182,7 +182,7 @@ class TestBF16ElementwiseAddOp(OpTest):
...
@@ -182,7 +182,7 @@ class TestBF16ElementwiseAddOp(OpTest):
}
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'use_mkldnn'
:
False
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'use_mkldnn'
:
False
}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
self
.
out
)}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
self
.
out
)}
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
place
=
core
.
CUDAPlace
(
0
)
...
@@ -204,7 +204,7 @@ class TestBF16ElementwiseAddOp(OpTest):
...
@@ -204,7 +204,7 @@ class TestBF16ElementwiseAddOp(OpTest):
place
,
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
),
check_prim
=
True
place
,
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
),
check_prim
=
True
)
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -481,7 +481,7 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
...
@@ -481,7 +481,7 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
self
.
y
=
np
.
random
.
rand
(
100
,
1
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
100
,
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
100
,
1
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
100
,
1
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
...
python/paddle/fluid/tests/unittests/test_elementwise_div_op.py
浏览文件 @
ac495981
...
@@ -38,7 +38,7 @@ class ElementwiseDivOp(OpTest):
...
@@ -38,7 +38,7 @@ class ElementwiseDivOp(OpTest):
self
.
init_dtype
()
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_shape
()
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
x
=
self
.
gen_data
(
self
.
x_shape
).
astype
(
self
.
val_dtype
)
x
=
self
.
gen_data
(
self
.
x_shape
).
astype
(
self
.
val_dtype
)
y
=
self
.
gen_data
(
self
.
y_shape
).
astype
(
self
.
val_dtype
)
y
=
self
.
gen_data
(
self
.
y_shape
).
astype
(
self
.
val_dtype
)
...
@@ -64,7 +64,7 @@ class ElementwiseDivOp(OpTest):
...
@@ -64,7 +64,7 @@ class ElementwiseDivOp(OpTest):
self
.
grad_x
=
grad_x
self
.
grad_x
=
grad_x
self
.
grad_y
=
grad_y
self
.
grad_y
=
grad_y
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
def
init_args
(
self
):
def
init_args
(
self
):
...
@@ -136,7 +136,7 @@ class TestElementwiseDivPrimOpFp32(ElementwiseDivOp):
...
@@ -136,7 +136,7 @@ class TestElementwiseDivPrimOpFp32(ElementwiseDivOp):
self
.
dtype
=
np
.
float32
self
.
dtype
=
np
.
float32
self
.
val_dtype
=
np
.
float32
self
.
val_dtype
=
np
.
float32
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -145,7 +145,7 @@ class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
...
@@ -145,7 +145,7 @@ class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
self
.
x_shape
=
[]
self
.
x_shape
=
[]
self
.
y_shape
=
[]
self
.
y_shape
=
[]
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -163,7 +163,7 @@ class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
...
@@ -163,7 +163,7 @@ class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
def
compute_gradient_y
(
self
,
grad_out
,
out
,
y
):
def
compute_gradient_y
(
self
,
grad_out
,
out
,
y
):
return
np
.
sum
(
-
1
*
grad_out
*
out
/
y
.
reshape
([
1
,
1
]))
return
np
.
sum
(
-
1
*
grad_out
*
out
/
y
.
reshape
([
1
,
1
]))
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -181,7 +181,7 @@ class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
...
@@ -181,7 +181,7 @@ class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
def
compute_gradient_y
(
self
,
grad_out
,
out
,
y
):
def
compute_gradient_y
(
self
,
grad_out
,
out
,
y
):
return
-
1
*
grad_out
*
out
/
y
return
-
1
*
grad_out
*
out
/
y
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -375,7 +375,7 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
...
@@ -375,7 +375,7 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
def
compute_gradient_x
(
self
,
grad_out
,
y
):
def
compute_gradient_x
(
self
,
grad_out
,
y
):
return
np
.
sum
(
grad_out
/
y
,
axis
=
(
0
,
1
))
return
np
.
sum
(
grad_out
/
y
,
axis
=
(
0
,
1
))
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -399,7 +399,7 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp):
...
@@ -399,7 +399,7 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp):
self
.
dtype
=
np
.
float16
self
.
dtype
=
np
.
float16
self
.
val_dtype
=
np
.
float16
self
.
val_dtype
=
np
.
float16
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
...
python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py
浏览文件 @
ac495981
...
@@ -35,7 +35,7 @@ class ElementwiseMulOp(OpTest):
...
@@ -35,7 +35,7 @@ class ElementwiseMulOp(OpTest):
self
.
init_input_output
()
self
.
init_input_output
()
self
.
init_kernel_type
()
self
.
init_kernel_type
()
self
.
init_axis
()
self
.
init_axis
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
self
.
inputs
=
{
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
x
),
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
x
),
...
@@ -88,7 +88,7 @@ class ElementwiseMulOp(OpTest):
...
@@ -88,7 +88,7 @@ class ElementwiseMulOp(OpTest):
def
init_axis
(
self
):
def
init_axis
(
self
):
pass
pass
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -98,7 +98,7 @@ class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp):
...
@@ -98,7 +98,7 @@ class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp):
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
multiply
(
self
.
x
,
self
.
y
)
self
.
out
=
np
.
multiply
(
self
.
x
,
self
.
y
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -108,7 +108,7 @@ class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp):
...
@@ -108,7 +108,7 @@ class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp):
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
multiply
(
self
.
x
,
self
.
y
)
self
.
out
=
np
.
multiply
(
self
.
x
,
self
.
y
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -118,7 +118,7 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
...
@@ -118,7 +118,7 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
multiply
(
self
.
x
,
self
.
y
)
self
.
out
=
np
.
multiply
(
self
.
x
,
self
.
y
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -145,7 +145,7 @@ class TestBF16ElementwiseMulOp(OpTest):
...
@@ -145,7 +145,7 @@ class TestBF16ElementwiseMulOp(OpTest):
}
}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
self
.
out
)}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
self
.
out
)}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'use_mkldnn'
:
False
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'use_mkldnn'
:
False
}
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -159,7 +159,7 @@ class TestBF16ElementwiseMulOp(OpTest):
...
@@ -159,7 +159,7 @@ class TestBF16ElementwiseMulOp(OpTest):
def
test_check_grad_ingore_y
(
self
):
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
),
check_prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
),
check_prim
=
True
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -349,7 +349,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp):
...
@@ -349,7 +349,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp):
def
init_dtype
(
self
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float16
self
.
dtype
=
np
.
float16
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py
浏览文件 @
ac495981
...
@@ -35,7 +35,7 @@ class TestElementwiseOp(OpTest):
...
@@ -35,7 +35,7 @@ class TestElementwiseOp(OpTest):
}
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -64,7 +64,7 @@ class TestElementwiseOp(OpTest):
...
@@ -64,7 +64,7 @@ class TestElementwiseOp(OpTest):
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
self
.
check_prim
=
True
self
.
check_prim
=
True
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -79,12 +79,12 @@ class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
...
@@ -79,12 +79,12 @@ class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
}
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
self
.
check_prim
=
True
self
.
check_prim
=
True
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -99,12 +99,12 @@ class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
...
@@ -99,12 +99,12 @@ class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
}
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
self
.
check_prim
=
True
self
.
check_prim
=
True
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -119,12 +119,12 @@ class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
...
@@ -119,12 +119,12 @@ class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
}
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]}
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
self
.
check_prim
=
True
self
.
check_prim
=
True
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -144,7 +144,7 @@ class TestBF16ElementwiseOp(OpTest):
...
@@ -144,7 +144,7 @@ class TestBF16ElementwiseOp(OpTest):
}
}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
out
)}
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
out
)}
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -165,7 +165,7 @@ class TestBF16ElementwiseOp(OpTest):
...
@@ -165,7 +165,7 @@ class TestBF16ElementwiseOp(OpTest):
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
self
.
check_prim
=
True
self
.
check_prim
=
True
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -371,7 +371,7 @@ class TestComplexElementwiseSubOp(OpTest):
...
@@ -371,7 +371,7 @@ class TestComplexElementwiseSubOp(OpTest):
self
.
attrs
=
{
'axis'
:
-
1
,
'use_mkldnn'
:
False
}
self
.
attrs
=
{
'axis'
:
-
1
,
'use_mkldnn'
:
False
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
if_check_prim
()
self
.
if_check_prim
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
init_base_dtype
(
self
):
def
init_base_dtype
(
self
):
self
.
dtype
=
np
.
float64
self
.
dtype
=
np
.
float64
...
@@ -424,7 +424,7 @@ class TestComplexElementwiseSubOp(OpTest):
...
@@ -424,7 +424,7 @@ class TestComplexElementwiseSubOp(OpTest):
check_prim
=
self
.
check_prim
,
check_prim
=
self
.
check_prim
,
)
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
...
@@ -446,7 +446,7 @@ class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
...
@@ -446,7 +446,7 @@ class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
self
.
grad_x
=
np
.
real
(
self
.
grad_out
)
self
.
grad_x
=
np
.
real
(
self
.
grad_out
)
self
.
grad_y
=
-
self
.
grad_out
self
.
grad_y
=
-
self
.
grad_out
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
def
if_check_prim
(
self
):
def
if_check_prim
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_fill_any_like_op.py
浏览文件 @
ac495981
...
@@ -41,7 +41,7 @@ class TestFillAnyLikeOp(OpTest):
...
@@ -41,7 +41,7 @@ class TestFillAnyLikeOp(OpTest):
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
219
,
232
)).
astype
(
self
.
dtype
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
219
,
232
)).
astype
(
self
.
dtype
)}
self
.
attrs
=
{
'value'
:
self
.
value
}
self
.
attrs
=
{
'value'
:
self
.
value
}
self
.
outputs
=
{
'Out'
:
self
.
value
*
np
.
ones_like
(
self
.
inputs
[
"X"
])}
self
.
outputs
=
{
'Out'
:
self
.
value
*
np
.
ones_like
(
self
.
inputs
[
"X"
])}
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
init
(
self
):
def
init
(
self
):
pass
pass
...
@@ -49,7 +49,7 @@ class TestFillAnyLikeOp(OpTest):
...
@@ -49,7 +49,7 @@ class TestFillAnyLikeOp(OpTest):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_prim
=
True
)
self
.
check_output
(
check_prim
=
True
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -58,7 +58,7 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
...
@@ -58,7 +58,7 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
self
.
dtype
=
np
.
float32
self
.
dtype
=
np
.
float32
self
.
value
=
0.0
self
.
value
=
0.0
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -79,13 +79,13 @@ class TestFillAnyLikeOpBfloat16(OpTest):
...
@@ -79,13 +79,13 @@ class TestFillAnyLikeOpBfloat16(OpTest):
self
.
value
*
np
.
ones_like
(
self
.
inputs
[
"X"
])
self
.
value
*
np
.
ones_like
(
self
.
inputs
[
"X"
])
)
)
}
}
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_prim
=
True
)
self
.
check_output_with_place
(
place
,
check_prim
=
True
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
self
.
enable_cinn
=
False
self
.
enable_cinn
=
False
...
@@ -93,7 +93,7 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp):
...
@@ -93,7 +93,7 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp):
def
init
(
self
):
def
init
(
self
):
self
.
value
=
1.0
self
.
value
=
1.0
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -101,7 +101,7 @@ class TestFillAnyLikeOpValue2(TestFillAnyLikeOp):
...
@@ -101,7 +101,7 @@ class TestFillAnyLikeOpValue2(TestFillAnyLikeOp):
def
init
(
self
):
def
init
(
self
):
self
.
value
=
1e-10
self
.
value
=
1e-10
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -109,7 +109,7 @@ class TestFillAnyLikeOpValue3(TestFillAnyLikeOp):
...
@@ -109,7 +109,7 @@ class TestFillAnyLikeOpValue3(TestFillAnyLikeOp):
def
init
(
self
):
def
init
(
self
):
self
.
value
=
1e-100
self
.
value
=
1e-100
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -131,9 +131,9 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp):
...
@@ -131,9 +131,9 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp):
*
np
.
ones_like
(
self
.
inputs
[
"X"
]).
astype
(
np
.
float32
)
*
np
.
ones_like
(
self
.
inputs
[
"X"
]).
astype
(
np
.
float32
)
}
}
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -141,7 +141,7 @@ class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp):
...
@@ -141,7 +141,7 @@ class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp):
def
init
(
self
):
def
init
(
self
):
self
.
dtype
=
np
.
float16
self
.
dtype
=
np
.
float16
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/test_full_like_op.py
浏览文件 @
ac495981
...
@@ -112,7 +112,7 @@ class TestFullLikeOp1(OpTest):
...
@@ -112,7 +112,7 @@ class TestFullLikeOp1(OpTest):
self
.
prim_op_type
=
"comp"
self
.
prim_op_type
=
"comp"
self
.
python_api
=
fill_any_like_wrapper
self
.
python_api
=
fill_any_like_wrapper
self
.
init_data
()
self
.
init_data
()
self
.
if_
skip
_cinn
()
self
.
if_
enable
_cinn
()
x
=
np
.
zeros
(
self
.
shape
)
x
=
np
.
zeros
(
self
.
shape
)
out
=
np
.
full_like
(
x
,
self
.
fill_value
,
self
.
dtype
)
out
=
np
.
full_like
(
x
,
self
.
fill_value
,
self
.
dtype
)
...
@@ -132,7 +132,7 @@ class TestFullLikeOp1(OpTest):
...
@@ -132,7 +132,7 @@ class TestFullLikeOp1(OpTest):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
,
check_prim
=
True
)
self
.
check_output
(
check_eager
=
True
,
check_prim
=
True
)
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -142,7 +142,7 @@ class TestFullLikeOp2(TestFullLikeOp1):
...
@@ -142,7 +142,7 @@ class TestFullLikeOp2(TestFullLikeOp1):
self
.
shape
=
[
1024
,
1024
]
self
.
shape
=
[
1024
,
1024
]
self
.
dtype
=
np
.
float64
self
.
dtype
=
np
.
float64
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
@@ -152,7 +152,7 @@ class TestFullLikeOp3(TestFullLikeOp1):
...
@@ -152,7 +152,7 @@ class TestFullLikeOp3(TestFullLikeOp1):
self
.
shape
=
[
5000
,
5000
]
self
.
shape
=
[
5000
,
5000
]
self
.
dtype
=
np
.
int64
self
.
dtype
=
np
.
int64
def
if_
skip
_cinn
(
self
):
def
if_
enable
_cinn
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/test_mean_op.py
浏览文件 @
ac495981
...
@@ -150,11 +150,13 @@ class TestReduceMeanOp(OpTest):
...
@@ -150,11 +150,13 @@ class TestReduceMeanOp(OpTest):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
'reduce_mean'
self
.
op_type
=
'reduce_mean'
self
.
python_api
=
reduce_mean_wrapper
self
.
python_api
=
reduce_mean_wrapper
self
.
prim_op_type
=
"comp"
self
.
dtype
=
'float64'
self
.
dtype
=
'float64'
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
axis
=
[
0
]
self
.
axis
=
[
0
]
self
.
keepdim
=
False
self
.
keepdim
=
False
self
.
set_attrs
()
self
.
set_attrs
()
self
.
if_enable_cinn
()
np
.
random
.
seed
(
10
)
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
...
@@ -173,20 +175,23 @@ class TestReduceMeanOp(OpTest):
...
@@ -173,20 +175,23 @@ class TestReduceMeanOp(OpTest):
def
set_attrs
(
self
):
def
set_attrs
(
self
):
pass
pass
def
if_enable_cinn
(
self
):
pass
def
test_check_output
(
self
):
def
test_check_output
(
self
):
if
self
.
dtype
!=
'float16'
:
if
self
.
dtype
!=
'float16'
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
(
check_eager
=
True
,
check_prim
=
True
)
else
:
else
:
place
=
paddle
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
=
place
)
self
.
check_output_with_place
(
place
=
place
,
check_prim
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
if
self
.
dtype
!=
'float16'
:
if
self
.
dtype
!=
'float16'
:
self
.
check_grad
([
'X'
],
[
'Out'
],
check_eager
=
True
)
self
.
check_grad
([
'X'
],
[
'Out'
],
check_eager
=
True
,
check_prim
=
True
)
else
:
else
:
place
=
paddle
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
place
,
[
'X'
],
[
'Out'
],
numeric_grad_delta
=
0.5
place
,
[
'X'
],
[
'Out'
],
numeric_grad_delta
=
0.5
,
check_prim
=
True
)
)
...
@@ -199,11 +204,13 @@ class TestReduceMeanBF16Op(OpTest):
...
@@ -199,11 +204,13 @@ class TestReduceMeanBF16Op(OpTest):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
'reduce_mean'
self
.
op_type
=
'reduce_mean'
self
.
python_api
=
reduce_mean_wrapper
self
.
python_api
=
reduce_mean_wrapper
self
.
prim_op_type
=
"comp"
self
.
dtype
=
np
.
uint16
self
.
dtype
=
np
.
uint16
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
axis
=
[
0
]
self
.
axis
=
[
0
]
self
.
keepdim
=
False
self
.
keepdim
=
False
self
.
set_attrs
()
self
.
set_attrs
()
self
.
enable_cinn
=
False
np
.
random
.
seed
(
10
)
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
np
.
float32
)
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
np
.
float32
)
...
@@ -224,12 +231,12 @@ class TestReduceMeanBF16Op(OpTest):
...
@@ -224,12 +231,12 @@ class TestReduceMeanBF16Op(OpTest):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
place
=
paddle
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
)
self
.
check_output_with_place
(
place
,
check_prim
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
place
=
paddle
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
self
.
check_grad_with_place
(
place
,
[
'X'
],
[
'Out'
],
numeric_grad_delta
=
0.05
place
,
[
'X'
],
[
'Out'
],
numeric_grad_delta
=
0.05
,
check_prim
=
True
)
)
...
@@ -237,6 +244,7 @@ class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
...
@@ -237,6 +244,7 @@ class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
'reduce_mean'
self
.
op_type
=
'reduce_mean'
self
.
python_api
=
reduce_mean_wrapper
self
.
python_api
=
reduce_mean_wrapper
self
.
prim_op_type
=
"comp"
self
.
dtype
=
'float64'
self
.
dtype
=
'float64'
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
shape
=
[
2
,
3
,
4
,
5
]
...
@@ -282,6 +290,9 @@ class TestReduceMeanOpShape6DFP16(TestReduceMeanOp):
...
@@ -282,6 +290,9 @@ class TestReduceMeanOpShape6DFP16(TestReduceMeanOp):
self
.
shape
=
[
2
,
3
,
4
,
5
,
6
,
7
]
self
.
shape
=
[
2
,
3
,
4
,
5
,
6
,
7
]
self
.
dtype
=
'float16'
self
.
dtype
=
'float16'
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
class
TestReduceMeanOpAxisAll
(
TestReduceMeanOp
):
class
TestReduceMeanOpAxisAll
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
def
set_attrs
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录