Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
e64a18db
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e64a18db
编写于
6月 08, 2023
作者:
C
Charles-hit
提交者:
GitHub
6月 08, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[AMP Prim OP]support some prim ops for bf16 dtype part3 (#54368)
* support some prim ops bf16 dtype * fix cmake
上级
fd9c555c
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
84 addition
and
23 deletion
+84
-23
test/legacy_test/CMakeLists.txt
test/legacy_test/CMakeLists.txt
+4
-1
test/legacy_test/test_assign_op.py
test/legacy_test/test_assign_op.py
+0
-1
test/legacy_test/test_erf_op.py
test/legacy_test/test_erf_op.py
+9
-7
test/legacy_test/test_fill_any_like_op.py
test/legacy_test/test_fill_any_like_op.py
+1
-1
test/legacy_test/test_flatten_contiguous_range_op.py
test/legacy_test/test_flatten_contiguous_range_op.py
+25
-4
test/legacy_test/test_index_select_op.py
test/legacy_test/test_index_select_op.py
+11
-3
test/legacy_test/test_top_k_v2_op.py
test/legacy_test/test_top_k_v2_op.py
+22
-4
test/legacy_test/test_transpose_op.py
test/legacy_test/test_transpose_op.py
+12
-2
未找到文件。
test/legacy_test/CMakeLists.txt
浏览文件 @
e64a18db
...
...
@@ -1199,7 +1199,10 @@ set(TEST_CINN_OPS
test_instance_norm_op
test_cumsum_op
test_pad_op
test_split_op
)
test_split_op
test_erf_op
test_assign_op
test_flatten_contiguous_range_op
)
foreach
(
TEST_CINN_OPS
${
TEST_CINN_OPS
}
)
if
(
WITH_CINN
)
...
...
test/legacy_test/test_assign_op.py
浏览文件 @
e64a18db
...
...
@@ -80,7 +80,6 @@ class TestAssignBFP16Op(eager_op_test.OpTest):
self
.
public_python_api
=
paddle
.
assign
self
.
op_type
=
"assign"
self
.
prim_op_type
=
"prim"
self
.
enable_cinn
=
False
x
=
np
.
random
.
uniform
(
0
,
1
,
[
100
,
10
]).
astype
(
np
.
float32
)
x
=
convert_float_to_uint16
(
x
)
self
.
inputs
=
{
'X'
:
x
}
...
...
test/legacy_test/test_erf_op.py
浏览文件 @
e64a18db
...
...
@@ -57,15 +57,17 @@ class TestErfLayer(unittest.TestCase):
np
.
testing
.
assert_allclose
(
y_ref
,
y_test
,
rtol
=
1e-05
)
def
test_case
(
self
):
self
.
_test_case
(
fluid
.
CPUPlace
())
if
fluid
.
is_compiled_with_cuda
():
self
.
_test_case
(
fluid
.
CUDAPlace
(
0
))
with
paddle
.
fluid
.
framework
.
_static_guard
():
self
.
_test_case
(
fluid
.
CPUPlace
())
if
fluid
.
is_compiled_with_cuda
():
self
.
_test_case
(
fluid
.
CUDAPlace
(
0
))
def
test_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
paddle
.
static
.
data
(
'x'
,
[
3
,
4
])
y
=
paddle
.
erf
(
x
,
name
=
'erf'
)
self
.
assertTrue
(
'erf'
in
y
.
name
)
with
paddle
.
fluid
.
framework
.
_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
paddle
.
static
.
data
(
'x'
,
[
3
,
4
])
y
=
paddle
.
erf
(
x
,
name
=
'erf'
)
self
.
assertTrue
(
'erf'
in
y
.
name
)
class
TestErfFP16OP
(
OpTest
):
...
...
test/legacy_test/test_fill_any_like_op.py
浏览文件 @
e64a18db
...
...
@@ -88,7 +88,7 @@ class TestFillAnyLikeOpBfloat16(OpTest):
self
.
check_output_with_place
(
place
,
check_prim
=
True
)
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
pass
class
TestFillAnyLikeOpValue1
(
TestFillAnyLikeOp
):
...
...
test/legacy_test/test_flatten_contiguous_range_op.py
浏览文件 @
e64a18db
...
...
@@ -30,7 +30,7 @@ class TestFlattenOp(OpTest):
self
.
prim_op_type
=
"comp"
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
skip
_cinn
()
self
.
if_enable
_cinn
()
self
.
init_test_case
()
self
.
init_test_dtype
()
self
.
init_input_data
()
...
...
@@ -40,8 +40,8 @@ class TestFlattenOp(OpTest):
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
),
}
def
skip
_cinn
(
self
):
self
.
enable_cinn
=
True
def
if_enable
_cinn
(
self
):
pass
def
test_check_output
(
self
):
if
str
(
self
.
dtype
)
in
{
"float16"
,
"uint16"
}:
...
...
@@ -104,6 +104,9 @@ class TestFlattenFP16Op(TestFlattenOp):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16Op
(
TestFlattenOp
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
@@ -142,6 +145,9 @@ class TestFlattenFP16Op_1(TestFlattenOp_1):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16Op_1
(
TestFlattenOp_1
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
@@ -180,6 +186,9 @@ class TestFlattenFP16Op_2(TestFlattenOp_2):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16Op_2
(
TestFlattenOp_2
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
@@ -218,6 +227,9 @@ class TestFlattenFP16Op_3(TestFlattenOp_3):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16Op_3
(
TestFlattenOp_3
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
@@ -256,6 +268,9 @@ class TestFlattenFP16Op_4(TestFlattenOp_4):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16Op_4
(
TestFlattenOp_4
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
@@ -294,6 +309,9 @@ class TestFlattenFP16Op_5(TestFlattenOp_5):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16Op_5
(
TestFlattenOp_5
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
@@ -305,7 +323,7 @@ class TestFlattenOp_ZeroDim(TestFlattenOp):
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
1
,)
def
skip
_cinn
(
self
):
def
if_enable
_cinn
(
self
):
self
.
enable_cinn
=
False
def
init_attrs
(
self
):
...
...
@@ -363,6 +381,9 @@ class TestFlattenFP16OpSixDims(TestFlattenOpSixDims):
"core is not complied with CUDA and not support the bfloat16"
,
)
class
TestFlattenBF16OpSixDims
(
TestFlattenOpSixDims
):
def
if_enable_cinn
(
self
):
pass
def
init_test_dtype
(
self
):
self
.
dtype
=
"uint16"
...
...
test/legacy_test/test_index_select_op.py
浏览文件 @
e64a18db
...
...
@@ -19,7 +19,7 @@ from eager_op_test import OpTest, convert_float_to_uint16
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid
import
Program
,
core
,
program_guard
np
.
random
.
seed
(
1024
)
...
...
@@ -102,8 +102,11 @@ class TestIndexSelectFP16OP(TestIndexSelectOp):
class
TestIndexSelectBF16Op
(
OpTest
):
def
setUp
(
self
):
self
.
python_api
=
paddle
.
index_select
self
.
public_python_api
=
paddle
.
index_select
self
.
prim_op_type
=
"comp"
self
.
op_type
=
"index_select"
self
.
init_dtype_type
()
self
.
if_skip_cinn
()
index_np
=
np
.
random
.
randint
(
low
=
0
,
high
=
self
.
x_shape
[
self
.
dim
],
size
=
self
.
index_size
)
...
...
@@ -124,6 +127,9 @@ class TestIndexSelectBF16Op(OpTest):
out
=
np
.
reshape
(
out_list
,
self
.
out_shape
)
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
out
)}
def
if_skip_cinn
(
self
):
self
.
enable_cinn
=
False
def
init_dtype_type
(
self
):
self
.
dim
=
1
self
.
x_type
=
np
.
uint16
...
...
@@ -132,10 +138,12 @@ class TestIndexSelectBF16Op(OpTest):
self
.
index_size
=
100
def
test_check_output
(
self
):
self
.
check_output
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestIndexSelectAPI
(
unittest
.
TestCase
):
...
...
test/legacy_test/test_top_k_v2_op.py
浏览文件 @
e64a18db
...
...
@@ -15,7 +15,11 @@
import
unittest
import
numpy
as
np
from
eager_op_test
import
OpTest
,
convert_float_to_uint16
from
eager_op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
,
)
import
paddle
from
paddle.fluid
import
core
...
...
@@ -51,6 +55,7 @@ class TestTopkOp(OpTest):
self
.
dtype
=
np
.
float64
self
.
input_data
=
np
.
random
.
rand
(
10
,
20
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
self
.
input_data
}
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
...
...
@@ -58,6 +63,9 @@ class TestTopkOp(OpTest):
)
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
def
if_enable_cinn
(
self
):
pass
def
test_check_output
(
self
):
self
.
check_output
()
...
...
@@ -115,6 +123,7 @@ class TestTopkOp4(TestTopkOp):
self
.
dtype
=
np
.
float64
self
.
input_data
=
np
.
random
.
rand
(
10
,
10
,
5
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
self
.
input_data
}
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
...
...
@@ -137,6 +146,7 @@ class TestTopkOp5(TestTopkOp):
self
.
dtype
=
np
.
float64
self
.
input_data
=
np
.
random
.
rand
(
10
,
10
,
5
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
self
.
input_data
}
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
...
...
@@ -159,6 +169,7 @@ class TestTopkOp6(TestTopkOp):
self
.
dtype
=
np
.
float32
self
.
input_data
=
np
.
random
.
rand
(
10
,
10
,
5
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
self
.
input_data
}
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
...
...
@@ -181,6 +192,7 @@ class TestTopkOp7(TestTopkOp):
self
.
dtype
=
np
.
float16
self
.
input_data
=
np
.
random
.
rand
(
10
,
20
,
10
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
self
.
input_data
}
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
...
...
@@ -198,6 +210,7 @@ class TestTopkFP16Op(TestTopkOp):
self
.
prim_op_type
=
"prim"
self
.
input_data
=
np
.
random
.
rand
(
10
,
20
).
astype
(
self
.
dtype
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
self
.
input_data
}
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
...
...
@@ -218,9 +231,11 @@ class TestTopkBF16Op(TestTopkOp):
self
.
public_python_api
=
paddle
.
topk
self
.
dtype
=
np
.
uint16
self
.
prim_op_type
=
"prim"
self
.
input_data
=
np
.
random
.
rand
(
10
,
20
).
astype
(
np
.
float32
)
self
.
input_data
=
np
.
random
.
rand
om
([
10
,
20
]
).
astype
(
np
.
float32
)
self
.
init_args
()
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
convert_float_to_uint16
(
self
.
input_data
)}
self
.
input_data
=
convert_uint16_to_float
(
self
.
inputs
[
'X'
])
self
.
attrs
=
{
'k'
:
self
.
k
,
'axis'
:
self
.
axis
,
'largest'
:
self
.
largest
}
output
,
indices
=
numpy_topk
(
self
.
input_data
,
axis
=
self
.
axis
,
k
=
self
.
k
,
largest
=
self
.
largest
...
...
@@ -230,13 +245,16 @@ class TestTopkBF16Op(TestTopkOp):
'Indices'
:
indices
,
}
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
{
'X'
},
'Out'
,
check_eager
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestTopKAPI
(
unittest
.
TestCase
):
...
...
test/legacy_test/test_transpose_op.py
浏览文件 @
e64a18db
...
...
@@ -244,7 +244,7 @@ class TestAutoTuneTransposeBF16Op(OpTest):
self
.
python_api
=
paddle
.
transpose
self
.
public_python_api
=
paddle
.
transpose
self
.
prim_op_type
=
"prim"
self
.
enable_cinn
=
False
self
.
if_enable_cinn
()
x
=
np
.
random
.
random
(
self
.
shape
).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
convert_float_to_uint16
(
x
)}
self
.
attrs
=
{
...
...
@@ -258,6 +258,9 @@ class TestAutoTuneTransposeBF16Op(OpTest):
'Out'
:
self
.
inputs
[
'X'
].
transpose
(
self
.
axis
),
}
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
def
initTestCase
(
self
):
fluid
.
core
.
set_autotune_range
(
0
,
3
)
fluid
.
core
.
update_autotune_status
()
...
...
@@ -283,7 +286,7 @@ class TestTransposeFP16Op(OpTest):
self
.
initTestCase
()
self
.
dtype
=
np
.
float16
self
.
prim_op_type
=
"prim"
self
.
enable_cinn
=
False
self
.
if_enable_cinn
()
self
.
python_api
=
paddle
.
transpose
self
.
public_python_api
=
paddle
.
transpose
x
=
np
.
random
.
random
(
self
.
shape
).
astype
(
self
.
dtype
)
...
...
@@ -298,6 +301,9 @@ class TestTransposeFP16Op(OpTest):
'Out'
:
self
.
inputs
[
'X'
].
transpose
(
self
.
axis
),
}
def
if_enable_cinn
(
self
):
pass
def
init_op_type
(
self
):
self
.
op_type
=
"transpose2"
self
.
use_mkldnn
=
False
...
...
@@ -323,6 +329,7 @@ class TestTransposeBF16Op(OpTest):
self
.
python_api
=
paddle
.
transpose
self
.
public_python_api
=
paddle
.
transpose
x
=
np
.
random
.
random
(
self
.
shape
).
astype
(
"float32"
)
self
.
if_enable_cinn
()
self
.
inputs
=
{
'X'
:
convert_float_to_uint16
(
x
)}
self
.
attrs
=
{
...
...
@@ -336,6 +343,9 @@ class TestTransposeBF16Op(OpTest):
'Out'
:
self
.
inputs
[
'X'
].
transpose
(
self
.
axis
),
}
def
if_enable_cinn
(
self
):
self
.
enable_cinn
=
False
def
init_op_type
(
self
):
self
.
op_type
=
"transpose2"
self
.
use_mkldnn
=
False
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录