未验证 提交 aaf873b2 编写于 作者: W WJJ1995 提交者: GitHub

[AMP OP&Test]Add fp16/bf16 support isnan/isfinite/isinf op (#52259)

* add bfp16 test for isfinite

* fixed for ci

* deal with comments

* fixed test

* skip test in cpu

* deal with comments

* fixed for ci

* fixed testcase

* fixed for ci

* fixed for testcase
上级 6b74cf76
......@@ -83,12 +83,13 @@ struct DataTypeTrait<void> {
_ForEachDataTypeHelper_( \
callback, ::paddle::platform::complex<double>, COMPLEX128);
#define _ForEachDataTypeNormal_(callback) \
_ForEachDataTypeHelper_(callback, float, FP32); \
_ForEachDataTypeHelper_(callback, double, FP64); \
_ForEachDataTypeHelper_(callback, int, INT32); \
_ForEachDataTypeHelper_(callback, int64_t, INT64); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::float16, FP16);
#define _ForEachDataTypeNormal_(callback) \
_ForEachDataTypeHelper_(callback, float, FP32); \
_ForEachDataTypeHelper_(callback, double, FP64); \
_ForEachDataTypeHelper_(callback, int, INT32); \
_ForEachDataTypeHelper_(callback, int64_t, INT64); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::float16, FP16); \
_ForEachDataTypeHelper_(callback, ::paddle::platform::bfloat16, BF16);
// For the use of thrust, as index-type elements can be only integers.
#define _ForEachDataTypeTiny_(callback) \
......
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/isfinite_op.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators;
......@@ -22,18 +23,21 @@ REGISTER_OP_CUDA_KERNEL(
ops::OverflowKernel<phi::GPUContext, int, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, float, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, double, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::InfinityFunctor>);
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::InfinityFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::bfloat16, ops::InfinityFunctor>);
REGISTER_OP_CUDA_KERNEL(
isnan,
ops::OverflowKernel<phi::GPUContext, int, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, float, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, double, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::NANFunctor>);
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::NANFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::bfloat16, ops::NANFunctor>);
REGISTER_OP_CUDA_KERNEL(
isfinite,
ops::OverflowKernel<phi::GPUContext, int, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, float, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, double, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::IsfiniteFunctor>);
ops::OverflowKernel<phi::GPUContext, plat::float16, ops::IsfiniteFunctor>,
ops::OverflowKernel<phi::GPUContext, plat::bfloat16, ops::IsfiniteFunctor>);
......@@ -25,6 +25,7 @@ PD_REGISTER_KERNEL(isinf,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......@@ -37,6 +38,7 @@ PD_REGISTER_KERNEL(isnan,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......@@ -49,6 +51,7 @@ PD_REGISTER_KERNEL(isfinite,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......
......@@ -45,6 +45,13 @@ struct IsNanFunctor<phi::dtype::float16, void> {
}
};
template <>
struct IsNanFunctor<phi::dtype::bfloat16, void> {
HOSTDEVICE bool operator()(const phi::dtype::bfloat16& a) const {
return phi::dtype::isnan(a);
}
};
template <typename T, class Enable = void>
struct IsInfFunctor {
HOSTDEVICE bool operator()(const T& a) const {
......@@ -69,6 +76,13 @@ struct IsInfFunctor<phi::dtype::float16, void> {
}
};
template <>
struct IsInfFunctor<phi::dtype::bfloat16, void> {
HOSTDEVICE bool operator()(const phi::dtype::bfloat16& a) const {
return phi::dtype::isinf(a);
}
};
template <typename T, class Enable = void>
struct IsFiniteFunctor {
HOSTDEVICE bool operator()(const T& a) const {
......@@ -94,5 +108,12 @@ struct IsFiniteFunctor<phi::dtype::float16, void> {
}
};
template <>
struct IsFiniteFunctor<phi::dtype::bfloat16, void> {
HOSTDEVICE bool operator()(const phi::dtype::bfloat16& a) const {
return phi::dtype::isfinite(a);
}
};
} // namespace funcs
} // namespace phi
......@@ -25,6 +25,7 @@ PD_REGISTER_KERNEL(isinf,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......@@ -37,6 +38,7 @@ PD_REGISTER_KERNEL(isnan,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......@@ -49,6 +51,7 @@ PD_REGISTER_KERNEL(isfinite,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
from paddle.fluid import core
......@@ -48,6 +48,28 @@ class TestFP16Inf(TestInf):
self.dtype = np.float16
# BFP16 isinf Test
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestInfBF16(OpTest):
def setUp(self):
self.op_type = "isinf"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
x[0] = np.inf
x[-1] = np.inf
out = np.array(True)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': out}
def test_output(self):
self.check_output_with_place(core.CUDAPlace(0))
class TestNAN(OpTest):
def setUp(self):
self.op_type = "isnan"
......@@ -76,6 +98,28 @@ class TestFP16NAN(TestNAN):
self.dtype = np.float16
# BFP16 isnan Test
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestNANBF16(OpTest):
def setUp(self):
self.op_type = "isnan"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
x[0] = np.nan
x[-1] = np.nan
out = np.array(True)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': out}
def test_output(self):
self.check_output_with_place(core.CUDAPlace(0))
class TestIsfinite(OpTest):
def setUp(self):
self.op_type = "isfinite"
......@@ -105,5 +149,27 @@ class TestFP16Isfinite(TestIsfinite):
self.dtype = np.float16
# BFP16 isfinite Test
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestIsfiniteBF16(OpTest):
def setUp(self):
self.op_type = "isfinite"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
x[0] = np.inf
x[-1] = np.nan
out = np.array(False)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': out}
def test_output(self):
self.check_output_with_place(core.CUDAPlace(0))
if __name__ == '__main__':
unittest.main()
......@@ -3466,7 +3466,14 @@ def isfinite(x, name=None):
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64'],
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'isfinite',
)
out = helper.create_variable_for_type_inference('bool')
......@@ -3502,7 +3509,17 @@ def isinf(x, name=None):
else:
helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf'
x,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'isinf',
)
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
......@@ -3535,7 +3552,17 @@ def isnan(x, name=None):
else:
helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan'
x,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'isnan',
)
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册