未验证 提交 aaf873b2 编写于 作者: W WJJ1995 提交者: GitHub

[AMP OP&Test]Add fp16/bf16 support isnan/isfinite/isinf op (#52259)

* add bfp16 test for isfinite

* fixed for ci

* deal with comments

* fixed test

* skip test in cpu

* deal with comments

* fixed for ci

* fixed testcase

* fixed for ci

* fixed for testcase
上级 6b74cf76
...@@ -88,7 +88,8 @@ struct DataTypeTrait<void> { ...@@ -88,7 +88,8 @@ struct DataTypeTrait<void> {
_ForEachDataTypeHelper_(callback, double, FP64); \ _ForEachDataTypeHelper_(callback, double, FP64); \
_ForEachDataTypeHelper_(callback, int, INT32); \ _ForEachDataTypeHelper_(callback, int, INT32); \
_ForEachDataTypeHelper_(callback, int64_t, INT64); \ _ForEachDataTypeHelper_(callback, int64_t, INT64); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::float16, FP16); _ForEachDataTypeHelper_(callback, ::paddle::platform::float16, FP16); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::bfloat16, BF16);
// For the use of thrust, as index-type elements can be only integers. // For the use of thrust, as index-type elements can be only integers.
#define _ForEachDataTypeTiny_(callback) \ #define _ForEachDataTypeTiny_(callback) \
......
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/operators/isfinite_op.h" #include "paddle/fluid/operators/isfinite_op.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
...@@ -22,18 +23,21 @@ REGISTER_OP_CUDA_KERNEL( ...@@ -22,18 +23,21 @@ REGISTER_OP_CUDA_KERNEL(
ops::OverflowKernel<phi::GPUContext, int, ops::InfinityFunctor>, ops::OverflowKernel<phi::GPUContext, int, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, float, ops::InfinityFunctor>, ops::OverflowKernel<phi::GPUContext, float, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, double, ops::InfinityFunctor>, ops::OverflowKernel<phi::GPUContext, double, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::InfinityFunctor>); ops::OverflowKernel<phi::GPUContext, plat::float16, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::bfloat16, ops::InfinityFunctor>);
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
isnan, isnan,
ops::OverflowKernel<phi::GPUContext, int, ops::NANFunctor>, ops::OverflowKernel<phi::GPUContext, int, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, float, ops::NANFunctor>, ops::OverflowKernel<phi::GPUContext, float, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, double, ops::NANFunctor>, ops::OverflowKernel<phi::GPUContext, double, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::NANFunctor>); ops::OverflowKernel<phi::GPUContext, plat::float16, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::bfloat16, ops::NANFunctor>);
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
isfinite, isfinite,
ops::OverflowKernel<phi::GPUContext, int, ops::IsfiniteFunctor>, ops::OverflowKernel<phi::GPUContext, int, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, float, ops::IsfiniteFunctor>, ops::OverflowKernel<phi::GPUContext, float, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, double, ops::IsfiniteFunctor>, ops::OverflowKernel<phi::GPUContext, double, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::IsfiniteFunctor>); ops::OverflowKernel<phi::GPUContext, plat::float16, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::bfloat16, ops::IsfiniteFunctor>);
...@@ -25,6 +25,7 @@ PD_REGISTER_KERNEL(isinf, ...@@ -25,6 +25,7 @@ PD_REGISTER_KERNEL(isinf,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int, int,
int64_t) { int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
...@@ -37,6 +38,7 @@ PD_REGISTER_KERNEL(isnan, ...@@ -37,6 +38,7 @@ PD_REGISTER_KERNEL(isnan,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int, int,
int64_t) { int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
...@@ -49,6 +51,7 @@ PD_REGISTER_KERNEL(isfinite, ...@@ -49,6 +51,7 @@ PD_REGISTER_KERNEL(isfinite,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int, int,
int64_t) { int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......
...@@ -45,6 +45,13 @@ struct IsNanFunctor<phi::dtype::float16, void> { ...@@ -45,6 +45,13 @@ struct IsNanFunctor<phi::dtype::float16, void> {
} }
}; };
template <>
struct IsNanFunctor<phi::dtype::bfloat16, void> {
HOSTDEVICE bool operator()(const phi::dtype::bfloat16& a) const {
return phi::dtype::isnan(a);
}
};
template <typename T, class Enable = void> template <typename T, class Enable = void>
struct IsInfFunctor { struct IsInfFunctor {
HOSTDEVICE bool operator()(const T& a) const { HOSTDEVICE bool operator()(const T& a) const {
...@@ -69,6 +76,13 @@ struct IsInfFunctor<phi::dtype::float16, void> { ...@@ -69,6 +76,13 @@ struct IsInfFunctor<phi::dtype::float16, void> {
} }
}; };
template <>
struct IsInfFunctor<phi::dtype::bfloat16, void> {
HOSTDEVICE bool operator()(const phi::dtype::bfloat16& a) const {
return phi::dtype::isinf(a);
}
};
template <typename T, class Enable = void> template <typename T, class Enable = void>
struct IsFiniteFunctor { struct IsFiniteFunctor {
HOSTDEVICE bool operator()(const T& a) const { HOSTDEVICE bool operator()(const T& a) const {
...@@ -94,5 +108,12 @@ struct IsFiniteFunctor<phi::dtype::float16, void> { ...@@ -94,5 +108,12 @@ struct IsFiniteFunctor<phi::dtype::float16, void> {
} }
}; };
template <>
struct IsFiniteFunctor<phi::dtype::bfloat16, void> {
HOSTDEVICE bool operator()(const phi::dtype::bfloat16& a) const {
return phi::dtype::isfinite(a);
}
};
} // namespace funcs } // namespace funcs
} // namespace phi } // namespace phi
...@@ -25,6 +25,7 @@ PD_REGISTER_KERNEL(isinf, ...@@ -25,6 +25,7 @@ PD_REGISTER_KERNEL(isinf,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int, int,
int64_t) { int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
...@@ -37,6 +38,7 @@ PD_REGISTER_KERNEL(isnan, ...@@ -37,6 +38,7 @@ PD_REGISTER_KERNEL(isnan,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int, int,
int64_t) { int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
...@@ -49,6 +51,7 @@ PD_REGISTER_KERNEL(isfinite, ...@@ -49,6 +51,7 @@ PD_REGISTER_KERNEL(isfinite,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int, int,
int64_t) { int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest from eager_op_test import OpTest, convert_float_to_uint16
from paddle.fluid import core from paddle.fluid import core
...@@ -48,6 +48,28 @@ class TestFP16Inf(TestInf): ...@@ -48,6 +48,28 @@ class TestFP16Inf(TestInf):
self.dtype = np.float16 self.dtype = np.float16
# BFP16 isinf Test
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestInfBF16(OpTest):
def setUp(self):
self.op_type = "isinf"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
x[0] = np.inf
x[-1] = np.inf
out = np.array(True)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': out}
def test_output(self):
self.check_output_with_place(core.CUDAPlace(0))
class TestNAN(OpTest): class TestNAN(OpTest):
def setUp(self): def setUp(self):
self.op_type = "isnan" self.op_type = "isnan"
...@@ -76,6 +98,28 @@ class TestFP16NAN(TestNAN): ...@@ -76,6 +98,28 @@ class TestFP16NAN(TestNAN):
self.dtype = np.float16 self.dtype = np.float16
# BFP16 isnan Test
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestNANBF16(OpTest):
def setUp(self):
self.op_type = "isnan"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
x[0] = np.nan
x[-1] = np.nan
out = np.array(True)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': out}
def test_output(self):
self.check_output_with_place(core.CUDAPlace(0))
class TestIsfinite(OpTest): class TestIsfinite(OpTest):
def setUp(self): def setUp(self):
self.op_type = "isfinite" self.op_type = "isfinite"
...@@ -105,5 +149,27 @@ class TestFP16Isfinite(TestIsfinite): ...@@ -105,5 +149,27 @@ class TestFP16Isfinite(TestIsfinite):
self.dtype = np.float16 self.dtype = np.float16
# BFP16 isfinite Test
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestIsfiniteBF16(OpTest):
def setUp(self):
self.op_type = "isfinite"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
x[0] = np.inf
x[-1] = np.nan
out = np.array(False)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': out}
def test_output(self):
self.check_output_with_place(core.CUDAPlace(0))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -3466,7 +3466,14 @@ def isfinite(x, name=None): ...@@ -3466,7 +3466,14 @@ def isfinite(x, name=None):
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
['float16', 'float32', 'float64', 'int32', 'int64'], [
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'isfinite', 'isfinite',
) )
out = helper.create_variable_for_type_inference('bool') out = helper.create_variable_for_type_inference('bool')
...@@ -3502,7 +3509,17 @@ def isinf(x, name=None): ...@@ -3502,7 +3509,17 @@ def isinf(x, name=None):
else: else:
helper = LayerHelper("isinf_v2", **locals()) helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf' x,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'isinf',
) )
out = helper.create_variable_for_type_inference(dtype='bool') out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
...@@ -3535,7 +3552,17 @@ def isnan(x, name=None): ...@@ -3535,7 +3552,17 @@ def isnan(x, name=None):
else: else:
helper = LayerHelper("isnan_v2", **locals()) helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan' x,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'isnan',
) )
out = helper.create_variable_for_type_inference(dtype='bool') out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册