未验证 提交 b1805727 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

fix fill_constant bug, *test=kunlun (#39681)

* fix fill_constant bug, *test=kunlun

* fix fill_constant bug,*test=kunlun
上级 93016331
...@@ -134,7 +134,7 @@ XPUOpMap& get_kl2_ops() { ...@@ -134,7 +134,7 @@ XPUOpMap& get_kl2_ops() {
XPUKernelSet({pOpKernelType(vartype::INT64, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()), pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::INT16, XPUPlace()), pOpKernelType(vartype::INT16, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()), pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()), pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::FP64, XPUPlace()), pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::FP32, XPUPlace()),
......
...@@ -17,224 +17,234 @@ from __future__ import print_function ...@@ -17,224 +17,234 @@ from __future__ import print_function
import sys import sys
sys.path.append("..") sys.path.append("..")
import unittest import unittest
from op_test import OpTest
import paddle import paddle
from paddle.fluid import core
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16
from op_test_xpu import XPUOpTest
# Situation 1: Attr(shape) is a list(without tensor) from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestFillConstantOp1(OpTest): class XPUTestFillConstantOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
'''Test fill_constant op with specified value''' self.op_name = 'fill_constant'
self.op_type = "fill_constant" self.use_dynamic_create_class = False
self.inputs = {} # Situation 1: Attr(shape) is a list(without tensor)
self.attrs = {'shape': [123, 92], 'dtype': 5, 'value': 3.8} class TestFillConstantOp(XPUOpTest):
self.outputs = {'Out': np.full((123, 92), 3.8)} def setUp(self):
'''Test fill_constant op with specified value
def test_check_output(self): '''
place = paddle.XPUPlace(0) self.init_dtype()
self.check_output_with_place(place) self.set_xpu()
self.op_type = "fill_constant"
self.place = paddle.XPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.set_shape()
"core is not compiled with XPU") self.convert_dtype2index()
class TestFillConstantOp2(OpTest): self.set_value()
def setUp(self): self.set_data()
'''Test fill_constant op with default value'''
self.op_type = "fill_constant" def init_dtype(self):
self.dtype = self.in_type
self.inputs = {}
self.attrs = {'shape': [123, 92], 'dtype': 5} def set_shape(self):
self.outputs = {'Out': np.full((123, 92), 0.0)} self.shape = [90, 10]
def test_check_output(self): def set_xpu(self):
place = paddle.XPUPlace(0) self.__class__.use_xpu = True
self.check_output_with_place(place) self.__class__.no_need_check_grad = True
self.__class__.op_type = self.in_type
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def convert_dtype2index(self):
"core is not compiled with XPU") '''
class TestFillConstantOp3(OpTest): if new type added, need to add corresponding index
def setUp(self): '''
'''Test fill_constant op with specified int64 value''' if self.dtype == np.bool_:
self.op_type = "fill_constant" self.index = 0
if self.dtype == np.int16:
self.inputs = {} self.index = 1
self.attrs = {'shape': [123, 92], 'dtype': 3, 'value': 10000000000} if self.dtype == np.int32:
self.outputs = {'Out': np.full((123, 92), 10000000000)} self.index = 2
if self.dtype == np.int64:
def test_check_output(self): self.index = 3
place = paddle.XPUPlace(0) if self.dtype == np.float16:
self.check_output_with_place(place) self.index = 4
if self.dtype == np.float32:
self.index = 5
@unittest.skipIf(not paddle.is_compiled_with_xpu(), if self.dtype == np.float64:
"core is not compiled with XPU") self.index = 6
class TestFillConstantOp4(OpTest): if self.dtype == np.uint8:
def setUp(self): self.index = 20
'''Test fill_constant op with specified int value''' if self.dtype == np.int8:
self.op_type = "fill_constant" self.index = 21
if self.dtype == np.uint16: # same as paddle.bfloat16
self.inputs = {} self.index = 22
self.attrs = {'shape': [123, 92], 'dtype': 2, 'value': 3} if self.dtype == np.complex64:
self.outputs = {'Out': np.full((123, 92), 3)} self.index = 23
if self.dtype == np.complex128:
def test_check_output(self): self.index = 24
place = paddle.XPUPlace(0)
self.check_output_with_place(place) def set_value(self):
if self.index == 3:
self.value = 10000000000
# Situation 2: Attr(shape) is a list(with tensor) elif self.index == 0:
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.value = np.random.randint(0, 2)
"core is not compiled with XPU") elif self.index in [20, 21]:
class TestFillConstantOp1_ShapeTensorList(OpTest): self.value = 125
def setUp(self): elif self.index in [1, 2]:
'''Test fill_constant op with specified value''' self.value = 7
self.op_type = "fill_constant" elif self.index in [4, 5, 6]:
self.init_data() self.value = 1e-5
shape_tensor_list = [] elif self.index == 22:
for index, ele in enumerate(self.shape): self.value = 1.0
shape_tensor_list.append(("x" + str(index), np.ones( else:
(1)).astype('int32') * ele)) self.value = 3.7
self.inputs = {"ShapeTensorList": shape_tensor_list} def set_data(self):
self.attrs = { self.inputs = {}
'shape': self.infer_shape, self.attrs = {
'dtype': 5, 'shape': self.shape,
'value': self.value 'dtype': self.index,
} 'value': self.value
self.outputs = {'Out': np.full(self.shape, self.value)} }
self.outputs = {'Out': np.full(self.shape, self.value)}
def init_data(self):
self.shape = [123, 92] def test_check_output(self):
self.infer_shape = [-1, 92] self.check_output_with_place(self.place)
self.value = 3.8
class TestFillConstantOp2(TestFillConstantOp):
def test_check_output(self): '''Test fill_constant op with default value
place = paddle.XPUPlace(0) '''
self.check_output_with_place(place)
def set_shape(self):
self.shape = [10, 10]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") class TestFillConstantOp3(TestFillConstantOp):
class TestFillConstantOp2_ShapeTensorList(OpTest): '''Test fill_constant op with specified int64 value
def setUp(self): '''
'''Test fill_constant op with default value'''
self.op_type = "fill_constant" def set_shape(self):
self.init_data() self.shape = [123, 2, 1]
shape_tensor_list = []
for index, ele in enumerate(self.shape): class TestFillConstantOp4(TestFillConstantOp):
shape_tensor_list.append(("x" + str(index), np.ones( '''Test fill_constant op with specified int value
(1)).astype('int32') * ele)) '''
self.inputs = {"ShapeTensorList": shape_tensor_list} def set_shape(self):
self.attrs = {'shape': self.infer_shape, 'dtype': 5} self.shape = [123, 3, 2, 1]
self.outputs = {'Out': np.full(self.shape, 0.0)}
class TestFillConstantOp5(TestFillConstantOp):
def init_data(self): '''Test fill_constant op with specified float value
self.shape = [123, 92] '''
self.infer_shape = [-1, -1]
def set_shape(self):
def test_check_output(self): self.shape = [123]
place = paddle.XPUPlace(0)
self.check_output_with_place(place) # Situation 2: Attr(shape) is a list(with tensor)
class TestFillConstantOp1_ShapeTensorList(TestFillConstantOp):
'''Test fill_constant op with specified value
@unittest.skipIf(not paddle.is_compiled_with_xpu(), '''
"core is not compiled with XPU")
class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): def set_data(self):
def init_data(self): shape_tensor_list = []
self.shape = [123, 92] for index, ele in enumerate(self.shape):
self.infer_shape = [123, -1] shape_tensor_list.append(("x" + str(index), np.ones(
self.value = 10000000000 (1)).astype('int32') * ele))
self.inputs = {"ShapeTensorList": shape_tensor_list}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.attrs = {
"core is not compiled with XPU") 'shape': self.infer_shape,
class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): 'dtype': self.index,
def init_data(self): 'value': self.value
self.shape = [123, 92] }
self.infer_shape = [123, -1] self.outputs = {'Out': np.full(self.shape, self.value)}
self.value = 3 if self.index == 22:
self.outputs = {
'Out':
# Situation 3: shape is a tensor np.full(self.shape,
@unittest.skipIf(not paddle.is_compiled_with_xpu(), convert_float_to_uint16(
"core is not compiled with XPU") np.array([self.value]).astype("float32")))
class TestFillConstantOp1_ShapeTensor(OpTest): }
def setUp(self):
'''Test fill_constant op with specified value''' def set_shape(self):
self.op_type = "fill_constant" self.shape = [123, 92]
self.init_data() self.infer_shape = [123, 1]
self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} class TestFillConstantOp2_ShapeTensorList(TestFillConstantOp):
self.attrs = {'value': self.value, 'dtype': 5} '''Test fill_constant op with default value
self.outputs = {'Out': np.full(self.shape, self.value)} '''
def init_data(self): def set_data(self):
self.shape = [123, 92] shape_tensor_list = []
self.value = 3.8 for index, ele in enumerate(self.shape):
shape_tensor_list.append(("x" + str(index), np.ones(
def test_check_output(self): (1)).astype('int32') * ele))
place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.inputs = {"ShapeTensorList": shape_tensor_list}
self.attrs = {'shape': self.infer_shape, 'dtype': self.index}
self.outputs = {'Out': np.full(self.shape, 0.0)}
# Situation 4: value is a tensor
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def set_shape(self):
"core is not compiled with XPU") self.shape = [123, 2, 1]
class TestFillConstantOp1_ValueTensor(OpTest): self.infer_shape = [1, 1, 1]
def setUp(self):
'''Test fill_constant op with specified value''' class TestFillConstantOp3_ShapeTensorList(
self.op_type = "fill_constant" TestFillConstantOp1_ShapeTensorList):
self.init_data() def set_shape(self):
self.shape = [123, 3, 2, 1]
self.inputs = { self.infer_shape = [123, 111, 11, 1]
"ShapeTensor": np.array(self.shape).astype("int32"),
'ValueTensor': np.array([self.value]).astype("float32") class TestFillConstantOp4_ShapeTensorList(
} TestFillConstantOp1_ShapeTensorList):
self.attrs = {'value': self.value + 1.0, 'dtype': 5} def set_shape(self):
self.outputs = {'Out': np.full(self.shape, self.value)} self.shape = [123]
self.infer_shape = [1]
def init_data(self):
self.shape = [123, 92] # Situation 3: shape is a tensor
self.value = 3.8 class TestFillConstantOp1_ShapeTensor(TestFillConstantOp):
self.dtype = np.float32 '''Test fill_constant op with specified value
'''
def test_check_output(self):
place = paddle.XPUPlace(0) def set_data(self):
self.check_output_with_place(place) self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")}
self.attrs = {'value': self.value, 'dtype': self.index}
self.outputs = {'Out': np.full(self.shape, self.value)}
# Situation 5: value is a tensor if self.index == 22:
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.outputs = {
"core is not compiled with XPU") 'Out':
class TestFillConstantOp2_ValueTensor(OpTest): np.full(self.shape,
def setUp(self): convert_float_to_uint16(
'''Test fill_constant op with specified value''' np.array([self.value]).astype("float32")))
self.op_type = "fill_constant" }
self.init_data()
def set_shape(self):
self.inputs = { self.shape = [123, 92]
"ShapeTensor": np.array(self.shape).astype("int32"),
'ValueTensor': np.array([self.value]).astype("int32") # Situation 4: value is a tensor
} class TestFillConstantOp1_ValueTensor(TestFillConstantOp):
self.attrs = {'value': self.value, 'dtype': 2} '''Test fill_constant op with specified value
self.outputs = {'Out': np.full(self.shape, self.value)} '''
def init_data(self): def set_data(self):
self.shape = [123, 92] self.inputs = {
self.value = 3 "ShapeTensor": np.array(self.shape).astype("int32"),
self.dtype = np.int32 'ValueTensor': np.array([self.value]).astype(self.dtype)
}
def test_check_output(self): if self.index == 22:
place = paddle.XPUPlace(0) self.inputs = {
self.check_output_with_place(place) 'ValueTensor': convert_float_to_uint16(
np.array([self.value]).astype("float32"))
}
self.attrs = {'value': self.value, 'dtype': self.index}
self.outputs = {'Out': np.full(self.shape, self.value)}
def set_shape(self):
self.shape = [123, 92]
support_types = get_xpu_op_support_types('fill_constant')
for stype in support_types:
create_test_class(globals(), XPUTestFillConstantOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册