Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0c52e8a8
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0c52e8a8
编写于
12月 28, 2022
作者:
R
RichardWooSJTU
提交者:
GitHub
12月 28, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
skip this ut when cuda < 11.2 && cuda_arch < 8 (#49313)
上级
69e51c77
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
68 addition
and
0 deletion
+68
-0
python/paddle/fluid/tests/unittests/test_fused_multi_transformer_int8_op.py
...d/tests/unittests/test_fused_multi_transformer_int8_op.py
+68
-0
未找到文件。
python/paddle/fluid/tests/unittests/test_fused_multi_transformer_int8_op.py
浏览文件 @
0c52e8a8
...
...
@@ -15,8 +15,10 @@
import
unittest
import
numpy
as
np
from
test_sparse_attention_op
import
get_cuda_version
import
paddle
import
paddle.fluid.core
as
core
import
paddle.nn.functional
as
F
from
paddle
import
_legacy_C_ops
,
tensor
from
paddle.fluid.framework
import
default_main_program
...
...
@@ -128,6 +130,12 @@ def fused_multi_transformer_int8(
return
final_out
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8Op
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
config
()
...
...
@@ -781,6 +789,12 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase):
)
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpFp16
(
TestFusedMultiTransformerInt8Op
):
def
config
(
self
):
super
().
config
()
...
...
@@ -788,6 +802,12 @@ class TestFusedMultiTransformerInt8OpFp16(TestFusedMultiTransformerInt8Op):
self
.
layers
=
3
# odd layers
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpCacheKV
(
TestFusedMultiTransformerInt8Op
):
def
config
(
self
):
super
().
config
()
...
...
@@ -798,6 +818,12 @@ class TestFusedMultiTransformerInt8OpCacheKV(TestFusedMultiTransformerInt8Op):
self
.
layers
=
3
# odd layers
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpCacheKVFp16
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -809,6 +835,12 @@ class TestFusedMultiTransformerInt8OpCacheKVFp16(
self
.
x_type
=
np
.
float16
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpGenCacheKV
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -818,6 +850,12 @@ class TestFusedMultiTransformerInt8OpGenCacheKV(
self
.
gen_cache_kv
=
True
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpGenCacheKVFp16
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -829,6 +867,12 @@ class TestFusedMultiTransformerInt8OpGenCacheKVFp16(
self
.
layers
=
3
# odd layers
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpPostLayerNormFp16
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -839,6 +883,12 @@ class TestFusedMultiTransformerInt8OpPostLayerNormFp16(
self
.
pre_layer_norm
=
False
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpCacheKVPostLayerNorm
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -851,6 +901,12 @@ class TestFusedMultiTransformerInt8OpCacheKVPostLayerNorm(
self
.
pre_layer_norm
=
False
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpCacheKVPostLayerNormFp16
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -863,6 +919,12 @@ class TestFusedMultiTransformerInt8OpCacheKVPostLayerNormFp16(
self
.
pre_layer_norm
=
False
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpGenCacheKVPostLayerNorm
(
TestFusedMultiTransformerInt8Op
):
...
...
@@ -873,6 +935,12 @@ class TestFusedMultiTransformerInt8OpGenCacheKVPostLayerNorm(
self
.
pre_layer_norm
=
False
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
()
or
get_cuda_version
()
<
11020
or
paddle
.
device
.
cuda
.
get_device_capability
()[
0
]
<
8
,
"FusedMultiTransformerInt8 requires CUDA >= 11.2 and CUDA_ARCH >= 8"
,
)
class
TestFusedMultiTransformerInt8OpGenCacheKVPostLayerNormFp16
(
TestFusedMultiTransformerInt8Op
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录