未验证 提交 7f93e2b0 编写于 作者: Z zhangyikun02 提交者: GitHub

update unittests for tile op and silce op on XPU, test=kunlun (#40227)

上级 86919910
......@@ -18,16 +18,23 @@ import sys
import unittest
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
# Situation 1: starts(list, no tensor), ends(list, no tensor)
# 1.1 without attr(decrease)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp(OpTest):
class XPUTestSliceOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'slice'
self.use_dynamic_create_class = False
class TestSliceOp(XPUOpTest):
def setUp(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "slice"
self.config()
self.inputs = {'Input': self.input}
......@@ -41,39 +48,36 @@ class TestSliceOp(OpTest):
}
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [1, 0, 2]
self.ends = [3, 3, 4]
self.axes = [0, 1, 2]
self.infer_flags = [1, 1, 1]
self.out = self.input[1:3, 0:3, 2:4, :]
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['Input'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestCase1(TestSliceOp):
if self.dtype == np.float16:
self.check_grad_with_place(self.place, ['Input'], 'Out')
else:
user_defined_grad_outputs = np.random.random(
self.out.shape).astype(self.dtype)
self.check_grad_with_place(
self.place, ['Input'],
'Out',
user_defined_grad_outputs=user_defined_grad_outputs)
class TestCase1(TestSliceOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 2]
self.infer_flags = [1, 1, 1]
self.out = self.input[-3:3, 0:100, 2:-1, :]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestCase2(TestSliceOp):
class TestCase2(TestSliceOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 3]
......@@ -82,10 +86,15 @@ class TestCase2(TestSliceOp):
# 1.2 with attr(decrease)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp_decs_dim(OpTest):
class XPUTestSliceOp_decs_dim(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'slice'
self.use_dynamic_create_class = False
class TestSliceOp_decs_dim(XPUOpTest):
def setUp(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "slice"
self.config()
self.inputs = {'Input': self.input}
......@@ -100,7 +109,7 @@ class TestSliceOp_decs_dim(OpTest):
}
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [1, 0, 2]
self.ends = [2, 3, 4]
self.axes = [0, 1, 2]
......@@ -109,19 +118,22 @@ class TestSliceOp_decs_dim(OpTest):
self.out = self.input[1, 0:3, 2:4, :]
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['Input'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
if self.dtype == np.float16:
self.check_grad_with_place(self.place, ['Input'], 'Out')
else:
user_defined_grad_outputs = np.random.random(
self.out.shape).astype(self.dtype)
self.check_grad_with_place(
self.place, ['Input'],
'Out',
user_defined_grad_outputs=user_defined_grad_outputs)
class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [1, 0, 2]
self.ends = [2, 1, 4]
self.axes = [0, 1, 2]
......@@ -129,12 +141,9 @@ class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
self.infer_flags = [1, 1, 1]
self.out = self.input[1, 0, 2:4, :]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [-1, 0, 2]
self.ends = [1000000, 1, 4]
self.axes = [0, 1, 2]
......@@ -142,12 +151,9 @@ class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
self.infer_flags = [1, 1, 1]
self.out = self.input[-1, 0, 2:4, :]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
def config(self):
self.input = np.random.random([3, 4, 5, 7]).astype("float32")
self.input = np.random.random([3, 4, 5, 7]).astype(self.dtype)
self.starts = [0, 1, 2, 3]
self.ends = [1, 2, 3, 4]
self.axes = [0, 1, 2, 3]
......@@ -155,12 +161,9 @@ class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
self.infer_flags = [1, 1, 1]
self.out = self.input[0, 1, 2, 3:4]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [-1]
self.ends = [1000000]
self.axes = [3]
......@@ -168,12 +171,9 @@ class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
self.infer_flags = [1, 1, 1]
self.out = self.input[:, :, :, -1]
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [0, 1, 2, 3]
self.ends = [1, 2, 3, 4]
self.axes = [0, 1, 2, 3]
......@@ -182,5 +182,10 @@ class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
self.out = self.input[0, 1, 2, 3:4]
support_types = get_xpu_op_support_types('slice')
for stype in support_types:
create_test_class(globals(), XPUTestSliceOp, stype)
create_test_class(globals(), XPUTestSliceOp_decs_dim, stype)
if __name__ == '__main__':
unittest.main()
......@@ -24,27 +24,32 @@ import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
np.random.seed(10)
#Situation 1: repeat_times is a list (without tensor)
class TestTileOpRank1(XPUOpTest):
class XPUTestTileOpRank1(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'tile'
self.use_dynamic_create_class = False
class TestTileOpRank1(XPUOpTest):
def setUp(self):
self.set_xpu()
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.place = paddle.XPUPlace(0)
self.op_type = "tile"
self.init_data()
self.inputs = {'X': np.random.random(self.ori_shape).astype("float32")}
self.inputs = {
'X': np.random.random(self.ori_shape).astype(self.dtype)
}
self.attrs = {'repeat_times': self.repeat_times}
output = np.tile(self.inputs['X'], self.repeat_times)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]
......@@ -52,51 +57,48 @@ class TestTileOpRank1(XPUOpTest):
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
#with dimension expanding
class TestTileOpRank2Expanding(TestTileOpRank1):
#with dimension expanding
class TestTileOpRank2Expanding(TestTileOpRank1):
def init_data(self):
self.ori_shape = [120]
self.repeat_times = [2, 2]
class TestTileOpRank2(TestTileOpRank1):
class TestTileOpRank2(TestTileOpRank1):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]
class TestTileOpRank3_Corner(TestTileOpRank1):
class TestTileOpRank3_Corner(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (1, 1, 1)
class TestTileOpRank3_Corner2(TestTileOpRank1):
class TestTileOpRank3_Corner2(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (2, 2)
class TestTileOpRank3(TestTileOpRank1):
class TestTileOpRank3(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 15)
self.repeat_times = (2, 1, 4)
class TestTileOpRank4(TestTileOpRank1):
class TestTileOpRank4(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.repeat_times = (3, 2, 1, 2)
# Situation 2: repeat_times is a list (with tensor)
class TestTileOpRank1_tensor_attr(XPUOpTest):
class XPUTestTileOpRank1_tensor_attr(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'tile'
self.use_dynamic_create_class = False
class TestTileOpRank1_tensor_attr(XPUOpTest):
def setUp(self):
self.set_xpu()
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.place = paddle.XPUPlace(0)
self.op_type = "tile"
self.init_data()
......@@ -106,16 +108,13 @@ class TestTileOpRank1_tensor_attr(XPUOpTest):
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.ori_shape).astype("float32"),
'X': np.random.random(self.ori_shape).astype(self.dtype),
'repeat_times_tensor': repeat_times_tensor,
}
self.attrs = {"repeat_times": self.infer_repeat_times}
output = np.tile(self.inputs['X'], self.repeat_times)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]
......@@ -124,18 +123,13 @@ class TestTileOpRank1_tensor_attr(XPUOpTest):
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr):
class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [1, 1]
self.infer_repeat_times = [1, -1]
class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr):
class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]
......@@ -143,24 +137,27 @@ class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr):
# Situation 3: repeat_times is a tensor
class TestTileOpRank1_tensor(XPUOpTest):
class XPUTestTileOpRank1_tensor(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'tile'
self.use_dynamic_create_class = False
class TestTileOpRank1_tensor(XPUOpTest):
def setUp(self):
self.set_xpu()
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.place = paddle.XPUPlace(0)
self.op_type = "tile"
self.init_data()
self.inputs = {
'X': np.random.random(self.ori_shape).astype("float32"),
'X': np.random.random(self.ori_shape).astype(self.dtype),
'RepeatTimes': np.array(self.repeat_times).astype("int32"),
}
self.attrs = {}
output = np.tile(self.inputs['X'], self.repeat_times)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]
......@@ -168,77 +165,17 @@ class TestTileOpRank1_tensor(XPUOpTest):
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestTileOpRank2_tensor(TestTileOpRank1_tensor):
class TestTileOpRank2_tensor(TestTileOpRank1_tensor):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]
# Situation 4: input x is Integer
class TestTileOpInteger(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "tile"
self.inputs = {
'X': np.random.randint(
10, size=(4, 4, 5)).astype("int32")
}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
# Situation 5: input x is Integer
class TestTileOpInt64_t(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "tile"
self.inputs = {
'X': np.random.randint(
10, size=(2, 4, 5)).astype("int64")
}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
# Situation 6: input x is Bool
class TestTileOpBool(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "tile"
self.inputs = {
'X': np.random.randint(
10, size=(2, 4, 5)).astype("bool")
}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
support_types = get_xpu_op_support_types('tile')
for stype in support_types:
create_test_class(globals(), XPUTestTileOpRank1, stype)
create_test_class(globals(), XPUTestTileOpRank1_tensor_attr, stype)
create_test_class(globals(), XPUTestTileOpRank1_tensor, stype)
# Test python API
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册