From b1805727a0a7c716445283d21778c41473bf0749 Mon Sep 17 00:00:00 2001 From: helen88 Date: Mon, 21 Feb 2022 13:45:33 +0800 Subject: [PATCH] fix fill_constant bug, *test=kunlun (#39681) * fix fill_constant bug, *test=kunlun * fix fill_constant bug,*test=kunlun --- .../fluid/platform/device/xpu/xpu2_op_list.h | 2 +- .../xpu/test_fill_constant_op_xpu.py | 442 +++++++++--------- 2 files changed, 227 insertions(+), 217 deletions(-) diff --git a/paddle/fluid/platform/device/xpu/xpu2_op_list.h b/paddle/fluid/platform/device/xpu/xpu2_op_list.h index 6e7c98dd715..e27d56642ef 100644 --- a/paddle/fluid/platform/device/xpu/xpu2_op_list.h +++ b/paddle/fluid/platform/device/xpu/xpu2_op_list.h @@ -134,7 +134,7 @@ XPUOpMap& get_kl2_ops() { XPUKernelSet({pOpKernelType(vartype::INT64, XPUPlace()), pOpKernelType(vartype::INT32, XPUPlace()), pOpKernelType(vartype::INT16, XPUPlace()), - pOpKernelType(vartype::INT8, XPUPlace()), + pOpKernelType(vartype::UINT8, XPUPlace()), pOpKernelType(vartype::BOOL, XPUPlace()), pOpKernelType(vartype::FP64, XPUPlace()), pOpKernelType(vartype::FP32, XPUPlace()), diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py index b31c80ee9e7..d989fd0afad 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py @@ -17,224 +17,234 @@ from __future__ import print_function import sys sys.path.append("..") import unittest -from op_test import OpTest - import paddle +from paddle.fluid import core import numpy as np - - -# Situation 1: Attr(shape) is a list(without tensor) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp1(OpTest): - def setUp(self): - '''Test fill_constant op with specified value''' - self.op_type = "fill_constant" - - self.inputs = {} - self.attrs = {'shape': [123, 92], 'dtype': 5, 'value': 3.8} - self.outputs = {'Out': np.full((123, 92), 3.8)} - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp2(OpTest): - def setUp(self): - '''Test fill_constant op with default value''' - self.op_type = "fill_constant" - - self.inputs = {} - self.attrs = {'shape': [123, 92], 'dtype': 5} - self.outputs = {'Out': np.full((123, 92), 0.0)} - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp3(OpTest): - def setUp(self): - '''Test fill_constant op with specified int64 value''' - self.op_type = "fill_constant" - - self.inputs = {} - self.attrs = {'shape': [123, 92], 'dtype': 3, 'value': 10000000000} - self.outputs = {'Out': np.full((123, 92), 10000000000)} - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp4(OpTest): - def setUp(self): - '''Test fill_constant op with specified int value''' - self.op_type = "fill_constant" - - self.inputs = {} - self.attrs = {'shape': [123, 92], 'dtype': 2, 'value': 3} - self.outputs = {'Out': np.full((123, 92), 3)} - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -# Situation 2: Attr(shape) is a list(with tensor) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp1_ShapeTensorList(OpTest): - def setUp(self): - '''Test fill_constant op with specified value''' - self.op_type = "fill_constant" - self.init_data() - shape_tensor_list = [] - for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) - - self.inputs = {"ShapeTensorList": shape_tensor_list} - self.attrs = { - 'shape': self.infer_shape, - 'dtype': 5, - 'value': self.value - } - self.outputs = {'Out': np.full(self.shape, self.value)} - - def init_data(self): - self.shape = [123, 92] - self.infer_shape = [-1, 92] - self.value = 3.8 - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp2_ShapeTensorList(OpTest): - def setUp(self): - '''Test fill_constant op with default value''' - self.op_type = "fill_constant" - self.init_data() - shape_tensor_list = [] - for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) - - self.inputs = {"ShapeTensorList": shape_tensor_list} - self.attrs = {'shape': self.infer_shape, 'dtype': 5} - self.outputs = {'Out': np.full(self.shape, 0.0)} - - def init_data(self): - self.shape = [123, 92] - self.infer_shape = [-1, -1] - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): - def init_data(self): - self.shape = [123, 92] - self.infer_shape = [123, -1] - self.value = 10000000000 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): - def init_data(self): - self.shape = [123, 92] - self.infer_shape = [123, -1] - self.value = 3 - - -# Situation 3: shape is a tensor -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp1_ShapeTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value''' - self.op_type = "fill_constant" - self.init_data() - - self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} - self.attrs = {'value': self.value, 'dtype': 5} - self.outputs = {'Out': np.full(self.shape, self.value)} - - def init_data(self): - self.shape = [123, 92] - self.value = 3.8 - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -# Situation 4: value is a tensor -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp1_ValueTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value''' - self.op_type = "fill_constant" - self.init_data() - - self.inputs = { - "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype("float32") - } - self.attrs = {'value': self.value + 1.0, 'dtype': 5} - self.outputs = {'Out': np.full(self.shape, self.value)} - - def init_data(self): - self.shape = [123, 92] - self.value = 3.8 - self.dtype = np.float32 - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -# Situation 5: value is a tensor -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestFillConstantOp2_ValueTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value''' - self.op_type = "fill_constant" - self.init_data() - - self.inputs = { - "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype("int32") - } - self.attrs = {'value': self.value, 'dtype': 2} - self.outputs = {'Out': np.full(self.shape, self.value)} - - def init_data(self): - self.shape = [123, 92] - self.value = 3 - self.dtype = np.int32 - - def test_check_output(self): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - +from op_test import OpTest, convert_float_to_uint16 +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper + + +class XPUTestFillConstantOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'fill_constant' + self.use_dynamic_create_class = False + + # Situation 1: Attr(shape) is a list(without tensor) + class TestFillConstantOp(XPUOpTest): + def setUp(self): + '''Test fill_constant op with specified value + ''' + self.init_dtype() + self.set_xpu() + self.op_type = "fill_constant" + self.place = paddle.XPUPlace(0) + self.set_shape() + self.convert_dtype2index() + self.set_value() + self.set_data() + + def init_dtype(self): + self.dtype = self.in_type + + def set_shape(self): + self.shape = [90, 10] + + def set_xpu(self): + self.__class__.use_xpu = True + self.__class__.no_need_check_grad = True + self.__class__.op_type = self.in_type + + def convert_dtype2index(self): + ''' + if new type added, need to add corresponding index + ''' + if self.dtype == np.bool_: + self.index = 0 + if self.dtype == np.int16: + self.index = 1 + if self.dtype == np.int32: + self.index = 2 + if self.dtype == np.int64: + self.index = 3 + if self.dtype == np.float16: + self.index = 4 + if self.dtype == np.float32: + self.index = 5 + if self.dtype == np.float64: + self.index = 6 + if self.dtype == np.uint8: + self.index = 20 + if self.dtype == np.int8: + self.index = 21 + if self.dtype == np.uint16: # same as paddle.bfloat16 + self.index = 22 + if self.dtype == np.complex64: + self.index = 23 + if self.dtype == np.complex128: + self.index = 24 + + def set_value(self): + if self.index == 3: + self.value = 10000000000 + elif self.index == 0: + self.value = np.random.randint(0, 2) + elif self.index in [20, 21]: + self.value = 125 + elif self.index in [1, 2]: + self.value = 7 + elif self.index in [4, 5, 6]: + self.value = 1e-5 + elif self.index == 22: + self.value = 1.0 + else: + self.value = 3.7 + + def set_data(self): + self.inputs = {} + self.attrs = { + 'shape': self.shape, + 'dtype': self.index, + 'value': self.value + } + self.outputs = {'Out': np.full(self.shape, self.value)} + + def test_check_output(self): + self.check_output_with_place(self.place) + + class TestFillConstantOp2(TestFillConstantOp): + '''Test fill_constant op with default value + ''' + + def set_shape(self): + self.shape = [10, 10] + + class TestFillConstantOp3(TestFillConstantOp): + '''Test fill_constant op with specified int64 value + ''' + + def set_shape(self): + self.shape = [123, 2, 1] + + class TestFillConstantOp4(TestFillConstantOp): + '''Test fill_constant op with specified int value + ''' + + def set_shape(self): + self.shape = [123, 3, 2, 1] + + class TestFillConstantOp5(TestFillConstantOp): + '''Test fill_constant op with specified float value + ''' + + def set_shape(self): + self.shape = [123] + + # Situation 2: Attr(shape) is a list(with tensor) + class TestFillConstantOp1_ShapeTensorList(TestFillConstantOp): + '''Test fill_constant op with specified value + ''' + + def set_data(self): + shape_tensor_list = [] + for index, ele in enumerate(self.shape): + shape_tensor_list.append(("x" + str(index), np.ones( + (1)).astype('int32') * ele)) + + self.inputs = {"ShapeTensorList": shape_tensor_list} + self.attrs = { + 'shape': self.infer_shape, + 'dtype': self.index, + 'value': self.value + } + self.outputs = {'Out': np.full(self.shape, self.value)} + if self.index == 22: + self.outputs = { + 'Out': + np.full(self.shape, + convert_float_to_uint16( + np.array([self.value]).astype("float32"))) + } + + def set_shape(self): + self.shape = [123, 92] + self.infer_shape = [123, 1] + + class TestFillConstantOp2_ShapeTensorList(TestFillConstantOp): + '''Test fill_constant op with default value + ''' + + def set_data(self): + shape_tensor_list = [] + for index, ele in enumerate(self.shape): + shape_tensor_list.append(("x" + str(index), np.ones( + (1)).astype('int32') * ele)) + + self.inputs = {"ShapeTensorList": shape_tensor_list} + self.attrs = {'shape': self.infer_shape, 'dtype': self.index} + self.outputs = {'Out': np.full(self.shape, 0.0)} + + def set_shape(self): + self.shape = [123, 2, 1] + self.infer_shape = [1, 1, 1] + + class TestFillConstantOp3_ShapeTensorList( + TestFillConstantOp1_ShapeTensorList): + def set_shape(self): + self.shape = [123, 3, 2, 1] + self.infer_shape = [123, 111, 11, 1] + + class TestFillConstantOp4_ShapeTensorList( + TestFillConstantOp1_ShapeTensorList): + def set_shape(self): + self.shape = [123] + self.infer_shape = [1] + + # Situation 3: shape is a tensor + class TestFillConstantOp1_ShapeTensor(TestFillConstantOp): + '''Test fill_constant op with specified value + ''' + + def set_data(self): + self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} + self.attrs = {'value': self.value, 'dtype': self.index} + self.outputs = {'Out': np.full(self.shape, self.value)} + if self.index == 22: + self.outputs = { + 'Out': + np.full(self.shape, + convert_float_to_uint16( + np.array([self.value]).astype("float32"))) + } + + def set_shape(self): + self.shape = [123, 92] + + # Situation 4: value is a tensor + class TestFillConstantOp1_ValueTensor(TestFillConstantOp): + '''Test fill_constant op with specified value + ''' + + def set_data(self): + self.inputs = { + "ShapeTensor": np.array(self.shape).astype("int32"), + 'ValueTensor': np.array([self.value]).astype(self.dtype) + } + if self.index == 22: + self.inputs = { + 'ValueTensor': convert_float_to_uint16( + np.array([self.value]).astype("float32")) + } + self.attrs = {'value': self.value, 'dtype': self.index} + self.outputs = {'Out': np.full(self.shape, self.value)} + + def set_shape(self): + self.shape = [123, 92] + + +support_types = get_xpu_op_support_types('fill_constant') +for stype in support_types: + create_test_class(globals(), XPUTestFillConstantOp, stype) if __name__ == "__main__": paddle.enable_static() -- GitLab