未验证 提交 b1805727 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

fix fill_constant bug, *test=kunlun (#39681)

* fix fill_constant bug, *test=kunlun

* fix fill_constant bug,*test=kunlun
上级 93016331
...@@ -134,7 +134,7 @@ XPUOpMap& get_kl2_ops() { ...@@ -134,7 +134,7 @@ XPUOpMap& get_kl2_ops() {
XPUKernelSet({pOpKernelType(vartype::INT64, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()), pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::INT16, XPUPlace()), pOpKernelType(vartype::INT16, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()), pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()), pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::FP64, XPUPlace()), pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::FP32, XPUPlace()),
......
...@@ -17,85 +17,135 @@ from __future__ import print_function ...@@ -17,85 +17,135 @@ from __future__ import print_function
import sys import sys
sys.path.append("..") sys.path.append("..")
import unittest import unittest
from op_test import OpTest
import paddle import paddle
from paddle.fluid import core
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
# Situation 1: Attr(shape) is a list(without tensor) class XPUTestFillConstantOp(XPUOpTestWrapper):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def __init__(self):
"core is not compiled with XPU") self.op_name = 'fill_constant'
class TestFillConstantOp1(OpTest): self.use_dynamic_create_class = False
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.inputs = {}
self.attrs = {'shape': [123, 92], 'dtype': 5, 'value': 3.8}
self.outputs = {'Out': np.full((123, 92), 3.8)}
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), # Situation 1: Attr(shape) is a list(without tensor)
"core is not compiled with XPU") class TestFillConstantOp(XPUOpTest):
class TestFillConstantOp2(OpTest):
def setUp(self): def setUp(self):
'''Test fill_constant op with default value''' '''Test fill_constant op with specified value
'''
self.init_dtype()
self.set_xpu()
self.op_type = "fill_constant" self.op_type = "fill_constant"
self.place = paddle.XPUPlace(0)
self.set_shape()
self.convert_dtype2index()
self.set_value()
self.set_data()
def init_dtype(self):
self.dtype = self.in_type
def set_shape(self):
self.shape = [90, 10]
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.in_type
def convert_dtype2index(self):
'''
if new type added, need to add corresponding index
'''
if self.dtype == np.bool_:
self.index = 0
if self.dtype == np.int16:
self.index = 1
if self.dtype == np.int32:
self.index = 2
if self.dtype == np.int64:
self.index = 3
if self.dtype == np.float16:
self.index = 4
if self.dtype == np.float32:
self.index = 5
if self.dtype == np.float64:
self.index = 6
if self.dtype == np.uint8:
self.index = 20
if self.dtype == np.int8:
self.index = 21
if self.dtype == np.uint16: # same as paddle.bfloat16
self.index = 22
if self.dtype == np.complex64:
self.index = 23
if self.dtype == np.complex128:
self.index = 24
def set_value(self):
if self.index == 3:
self.value = 10000000000
elif self.index == 0:
self.value = np.random.randint(0, 2)
elif self.index in [20, 21]:
self.value = 125
elif self.index in [1, 2]:
self.value = 7
elif self.index in [4, 5, 6]:
self.value = 1e-5
elif self.index == 22:
self.value = 1.0
else:
self.value = 3.7
def set_data(self):
self.inputs = {} self.inputs = {}
self.attrs = {'shape': [123, 92], 'dtype': 5} self.attrs = {
self.outputs = {'Out': np.full((123, 92), 0.0)} 'shape': self.shape,
'dtype': self.index,
'value': self.value
}
self.outputs = {'Out': np.full(self.shape, self.value)}
def test_check_output(self): def test_check_output(self):
place = paddle.XPUPlace(0) self.check_output_with_place(self.place)
self.check_output_with_place(place)
class TestFillConstantOp2(TestFillConstantOp):
'''Test fill_constant op with default value
'''
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def set_shape(self):
"core is not compiled with XPU") self.shape = [10, 10]
class TestFillConstantOp3(OpTest):
def setUp(self):
'''Test fill_constant op with specified int64 value'''
self.op_type = "fill_constant"
self.inputs = {} class TestFillConstantOp3(TestFillConstantOp):
self.attrs = {'shape': [123, 92], 'dtype': 3, 'value': 10000000000} '''Test fill_constant op with specified int64 value
self.outputs = {'Out': np.full((123, 92), 10000000000)} '''
def test_check_output(self): def set_shape(self):
place = paddle.XPUPlace(0) self.shape = [123, 2, 1]
self.check_output_with_place(place)
class TestFillConstantOp4(TestFillConstantOp):
'''Test fill_constant op with specified int value
'''
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def set_shape(self):
"core is not compiled with XPU") self.shape = [123, 3, 2, 1]
class TestFillConstantOp4(OpTest):
def setUp(self):
'''Test fill_constant op with specified int value'''
self.op_type = "fill_constant"
self.inputs = {} class TestFillConstantOp5(TestFillConstantOp):
self.attrs = {'shape': [123, 92], 'dtype': 2, 'value': 3} '''Test fill_constant op with specified float value
self.outputs = {'Out': np.full((123, 92), 3)} '''
def test_check_output(self): def set_shape(self):
place = paddle.XPUPlace(0) self.shape = [123]
self.check_output_with_place(place)
# Situation 2: Attr(shape) is a list(with tensor)
class TestFillConstantOp1_ShapeTensorList(TestFillConstantOp):
'''Test fill_constant op with specified value
'''
# Situation 2: Attr(shape) is a list(with tensor) def set_data(self):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestFillConstantOp1_ShapeTensorList(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.init_data()
shape_tensor_list = [] shape_tensor_list = []
for index, ele in enumerate(self.shape): for index, ele in enumerate(self.shape):
shape_tensor_list.append(("x" + str(index), np.ones( shape_tensor_list.append(("x" + str(index), np.ones(
...@@ -104,137 +154,97 @@ class TestFillConstantOp1_ShapeTensorList(OpTest): ...@@ -104,137 +154,97 @@ class TestFillConstantOp1_ShapeTensorList(OpTest):
self.inputs = {"ShapeTensorList": shape_tensor_list} self.inputs = {"ShapeTensorList": shape_tensor_list}
self.attrs = { self.attrs = {
'shape': self.infer_shape, 'shape': self.infer_shape,
'dtype': 5, 'dtype': self.index,
'value': self.value 'value': self.value
} }
self.outputs = {'Out': np.full(self.shape, self.value)} self.outputs = {'Out': np.full(self.shape, self.value)}
if self.index == 22:
self.outputs = {
'Out':
np.full(self.shape,
convert_float_to_uint16(
np.array([self.value]).astype("float32")))
}
def init_data(self): def set_shape(self):
self.shape = [123, 92] self.shape = [123, 92]
self.infer_shape = [-1, 92] self.infer_shape = [123, 1]
self.value = 3.8
def test_check_output(self): class TestFillConstantOp2_ShapeTensorList(TestFillConstantOp):
place = paddle.XPUPlace(0) '''Test fill_constant op with default value
self.check_output_with_place(place) '''
def set_data(self):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestFillConstantOp2_ShapeTensorList(OpTest):
def setUp(self):
'''Test fill_constant op with default value'''
self.op_type = "fill_constant"
self.init_data()
shape_tensor_list = [] shape_tensor_list = []
for index, ele in enumerate(self.shape): for index, ele in enumerate(self.shape):
shape_tensor_list.append(("x" + str(index), np.ones( shape_tensor_list.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele)) (1)).astype('int32') * ele))
self.inputs = {"ShapeTensorList": shape_tensor_list} self.inputs = {"ShapeTensorList": shape_tensor_list}
self.attrs = {'shape': self.infer_shape, 'dtype': 5} self.attrs = {'shape': self.infer_shape, 'dtype': self.index}
self.outputs = {'Out': np.full(self.shape, 0.0)} self.outputs = {'Out': np.full(self.shape, 0.0)}
def init_data(self): def set_shape(self):
self.shape = [123, 92] self.shape = [123, 2, 1]
self.infer_shape = [-1, -1] self.infer_shape = [1, 1, 1]
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestFillConstantOp3_ShapeTensorList(
"core is not compiled with XPU") TestFillConstantOp1_ShapeTensorList):
class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): def set_shape(self):
def init_data(self): self.shape = [123, 3, 2, 1]
self.shape = [123, 92] self.infer_shape = [123, 111, 11, 1]
self.infer_shape = [123, -1]
self.value = 10000000000
class TestFillConstantOp4_ShapeTensorList(
TestFillConstantOp1_ShapeTensorList):
def set_shape(self):
self.shape = [123]
self.infer_shape = [1]
@unittest.skipIf(not paddle.is_compiled_with_xpu(), # Situation 3: shape is a tensor
"core is not compiled with XPU") class TestFillConstantOp1_ShapeTensor(TestFillConstantOp):
class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): '''Test fill_constant op with specified value
def init_data(self): '''
self.shape = [123, 92]
self.infer_shape = [123, -1]
self.value = 3
# Situation 3: shape is a tensor
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestFillConstantOp1_ShapeTensor(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.init_data()
def set_data(self):
self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")}
self.attrs = {'value': self.value, 'dtype': 5} self.attrs = {'value': self.value, 'dtype': self.index}
self.outputs = {'Out': np.full(self.shape, self.value)} self.outputs = {'Out': np.full(self.shape, self.value)}
if self.index == 22:
self.outputs = {
'Out':
np.full(self.shape,
convert_float_to_uint16(
np.array([self.value]).astype("float32")))
}
def init_data(self): def set_shape(self):
self.shape = [123, 92] self.shape = [123, 92]
self.value = 3.8
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
# Situation 4: value is a tensor
class TestFillConstantOp1_ValueTensor(TestFillConstantOp):
'''Test fill_constant op with specified value
'''
# Situation 4: value is a tensor def set_data(self):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestFillConstantOp1_ValueTensor(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.init_data()
self.inputs = { self.inputs = {
"ShapeTensor": np.array(self.shape).astype("int32"), "ShapeTensor": np.array(self.shape).astype("int32"),
'ValueTensor': np.array([self.value]).astype("float32") 'ValueTensor': np.array([self.value]).astype(self.dtype)
} }
self.attrs = {'value': self.value + 1.0, 'dtype': 5} if self.index == 22:
self.outputs = {'Out': np.full(self.shape, self.value)}
def init_data(self):
self.shape = [123, 92]
self.value = 3.8
self.dtype = np.float32
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
# Situation 5: value is a tensor
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestFillConstantOp2_ValueTensor(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.init_data()
self.inputs = { self.inputs = {
"ShapeTensor": np.array(self.shape).astype("int32"), 'ValueTensor': convert_float_to_uint16(
'ValueTensor': np.array([self.value]).astype("int32") np.array([self.value]).astype("float32"))
} }
self.attrs = {'value': self.value, 'dtype': 2} self.attrs = {'value': self.value, 'dtype': self.index}
self.outputs = {'Out': np.full(self.shape, self.value)} self.outputs = {'Out': np.full(self.shape, self.value)}
def init_data(self): def set_shape(self):
self.shape = [123, 92] self.shape = [123, 92]
self.value = 3
self.dtype = np.int32
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
support_types = get_xpu_op_support_types('fill_constant')
for stype in support_types:
create_test_class(globals(), XPUTestFillConstantOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册