From da492a13fb04aab7aef858e6d3e5019cfd035405 Mon Sep 17 00:00:00 2001 From: TTerror Date: Wed, 23 Feb 2022 10:41:59 +0800 Subject: [PATCH] refactoring gather/masked_select/arg_max unittests for kunlun, *test=kunlun (#39711) --- .../unittests/xpu/test_arg_max_op_xpu.py | 179 +++++------- .../tests/unittests/xpu/test_gather_op_xpu.py | 272 ++++++------------ .../xpu/test_masked_select_op_xpu.py | 80 ++---- 3 files changed, 190 insertions(+), 341 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py index cbdd9db8ee..519a185250 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py @@ -18,118 +18,93 @@ import unittest import numpy as np import sys sys.path.append("..") + +import paddle from op_test import OpTest from op_test_xpu import XPUOpTest -import paddle -import paddle.fluid.core as core +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() -class XPUBaseTestCase(XPUOpTest): - def initTestCase(self): - self.dims = (3, 4) - self.dtype = 'float32' - self.axis = 1 +class XPUTestArgMax(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'arg_max' - def setUp(self): - self.initTestCase() - self.__class__.op_type = 'arg_max' - self.__class__.use_xpu = True - np.random.seed(2021) - self.x = (np.random.random(self.dims)).astype(self.dtype) - self.inputs = {'X': self.x} - self.attrs = {'axis': self.axis, 'use_xpu': True} - if self.op_type == "arg_min": - self.outputs = {'Out': np.argmin(self.x, axis=self.axis)} - else: - self.outputs = {'Out': np.argmax(self.x, axis=self.axis)} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -# test argmax, dtype: float32 -class TestArgMaxFloat32Case1(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4, 5) - self.dtype = 'float32' - self.axis = -1 + class XPUBaseTestCase(XPUOpTest): + def initTestCase(self): + self.dims = (3, 4) + self.axis = 1 + def setUp(self): + self.op_type = 'arg_max' + self.dtype = self.in_type + self.initTestCase() -class TestArgMaxFloat32Case2(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4, 5) - self.dtype = 'float32' - self.axis = 0 - - -class TestArgMaxFloat32Case3(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4, 5) - self.dtype = 'float32' - self.axis = 1 - - -class TestArgMaxFloat32Case4(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4, 5) - self.dtype = 'float32' - self.axis = 2 - - -class TestArgMaxFloat32Case5(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4) - self.dtype = 'float32' - self.axis = -1 - - -class TestArgMaxFloat32Case6(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4) - self.dtype = 'float32' - self.axis = 0 - - -class TestArgMaxFloat32Case7(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, 4) - self.dtype = 'float32' - self.axis = 1 - - -class TestArgMaxFloat32Case8(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (1, ) - self.dtype = 'float32' - self.axis = 0 - - -class TestArgMaxFloat32Case9(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (2, ) - self.dtype = 'float32' - self.axis = 0 - + self.x = (np.random.random(self.dims)).astype(self.dtype) + self.inputs = {'X': self.x} + self.attrs = {'axis': self.axis, 'use_xpu': True} + self.outputs = {'Out': np.argmax(self.x, axis=self.axis)} -class TestArgMaxFloat32Case10(XPUBaseTestCase): - def initTestCase(self): - self.op_type = 'arg_max' - self.dims = (3, ) - self.dtype = 'float32' - self.axis = 0 + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + class TestArgMaxCase1(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4, 5) + self.axis = -1 + + class TestArgMaxCase2(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4, 5) + self.axis = 0 + + class TestArgMaxCase3(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4, 5) + self.axis = 1 + + class TestArgMaxCase4(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4, 5) + self.axis = 2 + + class TestArgMaxCase5(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4) + self.axis = -1 + + class TestArgMaxCase6(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4) + self.axis = 0 + + class TestArgMaxCase7(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, 4) + self.axis = 1 + + class TestArgMaxCase8(XPUBaseTestCase): + def initTestCase(self): + self.dims = (1, ) + self.axis = 0 + + class TestArgMaxCase9(XPUBaseTestCase): + def initTestCase(self): + self.dims = (2, ) + self.axis = 0 + + class TestArgMaxCase10(XPUBaseTestCase): + def initTestCase(self): + self.dims = (3, ) + self.axis = 0 + + +support_types = get_xpu_op_support_types('arg_max') +for stype in support_types: + create_test_class(globals(), XPUTestArgMax, stype) class TestArgMaxAPI(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py index bdf74018ab..f0e6315514 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py @@ -20,9 +20,8 @@ sys.path.append("..") import numpy as np import paddle -import paddle.fluid as fluid -from op_test import OpTest from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() @@ -34,194 +33,87 @@ def gather_numpy(x, index, axis): return gather -class TestXPUGatherOp(XPUOpTest): - def setUp(self): - self.op_type = "gather" - self.use_xpu = True - self.use_mkldnn = False - self.attrs = {'use_xpu': True} - - self.config() - xnp = np.random.random(self.x_shape).astype(self.x_type) - self.inputs = { - 'X': xnp, - 'Index': np.array(self.index).astype(self.index_type) - } - self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} - - def config(self): - """ - For multi-dimension input - """ - self.dtype = np.float32 - self.x_shape = (10, 20) - self.x_type = np.float32 - self.index = [1, 3, 5] - self.index_type = np.int32 - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') - - -class TestCase1(TestXPUGatherOp): - def config(self): - """ - For one dimension input - """ - self.dtype = np.float32 - self.x_shape = (100) - self.x_type = np.float32 - self.index = [1, 3, 5] - self.index_type = np.int32 - - -class TestCase2(TestXPUGatherOp): - def config(self): - """ - For int64_t index type - """ - self.dtype = np.float32 - self.x_shape = (100) - self.x_type = np.float32 - self.index = [1, 3, 5] - self.index_type = np.int64 - - -class TestCase3(TestXPUGatherOp): - def config(self): - """ - For other input type - """ - self.dtype = np.float32 - self.x_shape = (10, 20) - self.x_type = np.float32 - self.index = [1, 3, 5] - self.index_type = np.int32 - - -class TestCase4(TestXPUGatherOp): - def config(self): - self.dtype = np.float32 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': False} - self.x_type = np.float32 - self.index = [1, 1] - self.index_type = np.int32 - - -class TestCase5(TestXPUGatherOp): - def config(self): - self.dtype = np.float32 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': False} - self.x_type = np.float32 - self.index = [1, 1, 3] - self.index_type = np.int32 - - -class TestCase6(TestXPUGatherOp): - def config(self): - self.dtype = np.float32 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': True} - self.x_type = np.float32 - self.index = [1, 3] - self.index_type = np.int32 - - -class TestCase7(TestXPUGatherOp): - def config(self): - self.dtype = np.float32 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': True} - self.x_type = np.float32 - self.index = [1, 3] - self.index_type = np.int64 - - -## test fp16 -class TestCaseFP161(TestXPUGatherOp): - def config(self): - """ - For one dimension input - """ - self.dtype = np.float16 - self.x_shape = (100) - self.x_type = np.float16 - self.index = [1, 3, 5] - self.index_type = np.int32 - - -class TestCaseFP162(TestXPUGatherOp): - def config(self): - """ - For int64_t index type - """ - self.dtype = np.float16 - self.x_shape = (100) - self.x_type = np.float16 - self.index = [1, 3, 5] - self.index_type = np.int64 - - -class TestCaseFP163(TestXPUGatherOp): - def config(self): - """ - For other input type - """ - self.dtype = np.float16 - self.x_shape = (10, 20) - self.x_type = np.float16 - self.index = [1, 3, 5] - self.index_type = np.int32 - - -class TestCaseFP164(TestXPUGatherOp): - def config(self): - self.dtype = np.float16 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': False} - self.x_type = np.float16 - self.index = [1, 1] - self.index_type = np.int32 - - -class TestCaseFP165(TestXPUGatherOp): - def config(self): - self.dtype = np.float16 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': False} - self.x_type = np.float16 - self.index = [1, 1, 3] - self.index_type = np.int32 - - -class TestCaseFP166(TestXPUGatherOp): - def config(self): - self.dtype = np.float16 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': True} - self.x_type = np.float16 - self.index = [1, 3] - self.index_type = np.int32 - - -class TestCaseFP167(TestXPUGatherOp): - def config(self): - self.dtype = np.float16 - self.x_shape = (10, 20) - self.attrs = {'use_xpu': True, 'overwrite': True} - self.x_type = np.float16 - self.index = [1, 3] - self.index_type = np.int64 - +class XPUTestGather(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'gather' + + class TestXPUGatherOp(XPUOpTest): + def setUp(self): + self.op_type = "gather" + self.place = paddle.XPUPlace(0) + self.dtype = self.in_type + + self.init_config() + xnp = np.random.random(self.x_shape).astype(self.dtype) + self.inputs = { + 'X': xnp, + 'Index': np.array(self.index).astype(self.index_type) + } + self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} + + def init_config(self): + self.x_shape = (10, 20) + self.index = [1, 3, 5] + self.index_type = np.int32 + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + self.check_output_with_place(self.place) + + def test_check_grad(self): + if paddle.is_compiled_with_xpu(): + self.check_grad_with_place(self.place, ['X'], 'Out') + + class TestCase1(TestXPUGatherOp): + def init_config(self): + self.x_shape = (100) + self.index = [1, 3, 5] + self.index_type = np.int32 + + class TestCase2(TestXPUGatherOp): + def init_config(self): + self.x_shape = (100) + self.index = [1, 3, 5] + self.index_type = np.int64 + + class TestCase3(TestXPUGatherOp): + def init_config(self): + self.x_shape = (10, 20) + self.index = [1, 3, 5] + self.index_type = np.int32 + + class TestCase4(TestXPUGatherOp): + def init_config(self): + self.x_shape = (10, 20) + self.attrs = {'overwrite': False} + self.index = [1, 1] + self.index_type = np.int32 + + class TestCase5(TestXPUGatherOp): + def init_config(self): + self.x_shape = (10, 20) + self.attrs = {'overwrite': False} + self.index = [1, 1, 3] + self.index_type = np.int32 + + class TestCase6(TestXPUGatherOp): + def init_config(self): + self.x_shape = (10, 20) + self.attrs = {'overwrite': True} + self.index = [1, 3] + self.index_type = np.int32 + + class TestCase7(TestXPUGatherOp): + def init_config(self): + self.x_shape = (10, 20) + self.attrs = {'overwrite': True} + self.index = [1, 3] + self.index_type = np.int64 + + +support_types = get_xpu_op_support_types('gather') +for stype in support_types: + create_test_class(globals(), XPUTestGather, stype) if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py index 8c5b3f3d8a..990594e1f9 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py @@ -18,10 +18,11 @@ import numpy as np import unittest import sys sys.path.append("..") -from op_test import OpTest -from op_test_xpu import XPUOpTest + import paddle import paddle.fluid as fluid +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() @@ -34,61 +35,42 @@ def np_masked_select(x, mask): return result.flatten() -class TestMaskedSelectOp(XPUOpTest): - def set_xpu(self): - self.__class__.use_xpu = True - - def setUp(self): - self.set_xpu() - self.init() - self.init_dtype() - self.place = paddle.XPUPlace(0) - self.op_type = "masked_select" - x = np.random.random(self.shape).astype(self.dtype) - mask = np.array(np.random.randint(2, size=self.shape, dtype=bool)) - out = np_masked_select(x, mask) - self.inputs = {'X': x, 'Mask': mask} - self.outputs = {'Y': out} - - def test_check_output(self): - self.check_output_with_place(self.place) - - def test_check_grad(self): - pass - - def init(self): - self.shape = (50, 3) - - def init_dtype(self): - self.dtype = np.float32 - - -class TestMaskedSelectOp1(TestMaskedSelectOp): - def init(self): - self.shape = (6, 8, 9, 18) +class XPUTestMaskedSelectOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'masked_select' + class TestMaskedSelectOp(XPUOpTest): + def setUp(self): + self.init() + self.dtype = self.in_type + self.place = paddle.XPUPlace(0) + self.op_type = "masked_select" + self.__class__.no_need_check_grad = True -class TestMaskedSelectOp2(TestMaskedSelectOp): - def init(self): - self.shape = (168, ) + x = np.random.random(self.shape).astype(self.dtype) + mask = np.array(np.random.randint(2, size=self.shape, dtype=bool)) + out = np_masked_select(x, mask) + self.inputs = {'X': x, 'Mask': mask} + self.outputs = {'Y': out} + def test_check_output(self): + self.check_output_with_place(self.place) -class TestMaskedSelectOpInt32(TestMaskedSelectOp): - def init_dtype(self): - self.dtype = np.int32 + def init(self): + self.shape = (50, 3) - # skip_check_grad_ci(reason="get_numeric_gradient not support int32") - def test_check_grad(self): - pass + class TestMaskedSelectOp1(TestMaskedSelectOp): + def init(self): + self.shape = (6, 8, 9, 18) + class TestMaskedSelectOp2(TestMaskedSelectOp): + def init(self): + self.shape = (168, ) -class TestMaskedSelectOpInt64(TestMaskedSelectOp): - def init_dtype(self): - self.dtype = np.int64 - # skip_check_grad_ci(reason="get_numeric_gradient not support int64") - def test_check_grad(self): - pass +support_types = get_xpu_op_support_types('masked_select') +for stype in support_types: + create_test_class(globals(), XPUTestMaskedSelectOp, stype) class TestMaskedSelectAPI(unittest.TestCase): -- GitLab