From 0480ff5de0e919b5bcba96a7b54284d0dcdd55fd Mon Sep 17 00:00:00 2001 From: denglianbin <112610123+denglianbin@users.noreply.github.com> Date: Thu, 23 Mar 2023 10:08:18 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Hackathon=20No.45=E3=80=91=E4=B8=BA=20?= =?UTF-8?q?Paddle=20logical=20=E7=AE=97=E5=AD=90=E5=AE=9E=E7=8E=B0=20float?= =?UTF-8?q?16=20=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B=E6=94=AF=E6=8C=81=20(?= =?UTF-8?q?#50926)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * finish pr * skip cpu test for logical * change test style * fix error. --- paddle/phi/kernels/kps/logical_kernel.cu | 2 ++ .../fluid/tests/unittests/test_logical_op.py | 16 ++++++++++++ python/paddle/tensor/logic.py | 26 +++++++++++++------ 3 files changed, 36 insertions(+), 8 deletions(-) diff --git a/paddle/phi/kernels/kps/logical_kernel.cu b/paddle/phi/kernels/kps/logical_kernel.cu index 27c8c3d7d8e..88f313eba1a 100644 --- a/paddle/phi/kernels/kps/logical_kernel.cu +++ b/paddle/phi/kernels/kps/logical_kernel.cu @@ -77,12 +77,14 @@ PD_REGISTER_KERNEL(logical_xor, KPS, ALL_LAYOUT, phi::LogicalXorKernel, int) { kernel->OutputAt(0).SetDataType(phi::DataType::BOOL); } #else +using float16 = phi::dtype::float16; #define REGISTER_LOGICAL_CUDA_KERNEL(logical_and, func_type) \ PD_REGISTER_KERNEL(logical_and, \ KPS, \ ALL_LAYOUT, \ phi::Logical##func_type##Kernel, \ float, \ + float16, \ double, \ bool, \ int64_t, \ diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index cccee4e8bc8..ad80b4ce657 100755 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -26,6 +26,7 @@ SUPPORTED_DTYPES = [ np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64, ] @@ -132,6 +133,11 @@ def test(unit_test, use_gpu=False, test_error=False): META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): for data_type in SUPPORTED_DTYPES: + if ( + not (paddle.is_compiled_with_cuda() and use_gpu) + and data_type == np.float16 + ): + continue meta_data['x_np'] = np_data_generator( shape_data['x_shape'], dtype=data_type ) @@ -184,6 +190,16 @@ def test_type_error(unit_test, use_gpu, type_str_map): if use_gpu and paddle.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) for op_data in TEST_META_OP_DATA: + if ( + paddle.is_compiled_with_cuda() + and use_gpu + and ( + type_str_map['x'] == np.float16 + or type_str_map['y'] == np.float16 + ) + ): + continue + meta_data = dict(op_data) binary_op = meta_data['binary_op'] diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index e332e554a55..e10e7c647be 100644 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -45,7 +45,16 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): check_variable_and_dtype( x, "x", - ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], + [ + "bool", + "int8", + "int16", + "int32", + "int64", + "float16", + "float32", + "float64", + ], op_name, ) if y is not None: @@ -58,6 +67,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): "int16", "int32", "int64", + "float16", "float32", "float64", ], @@ -105,8 +115,8 @@ def logical_and(x, y, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. - y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. out(Tensor, optional): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -147,8 +157,8 @@ def logical_or(x, y, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. - y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -191,8 +201,8 @@ def logical_xor(x, y, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. - y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -237,7 +247,7 @@ def logical_not(x, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float32, or float64. + x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. -- GitLab