未验证 提交 da492a13 编写于 作者: T TTerror 提交者: GitHub

refactoring gather/masked_select/arg_max unittests for kunlun, *test=kunlun (#39711)

上级 22abb6b3
......@@ -18,31 +18,32 @@ import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid.core as core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class XPUBaseTestCase(XPUOpTest):
class XPUTestArgMax(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'arg_max'
class XPUBaseTestCase(XPUOpTest):
def initTestCase(self):
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1
def setUp(self):
self.op_type = 'arg_max'
self.dtype = self.in_type
self.initTestCase()
self.__class__.op_type = 'arg_max'
self.__class__.use_xpu = True
np.random.seed(2021)
self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis, 'use_xpu': True}
if self.op_type == "arg_min":
self.outputs = {'Out': np.argmin(self.x, axis=self.axis)}
else:
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
def test_check_output(self):
......@@ -50,88 +51,62 @@ class XPUBaseTestCase(XPUOpTest):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
# test argmax, dtype: float32
class TestArgMaxFloat32Case1(XPUBaseTestCase):
class TestArgMaxCase1(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = -1
class TestArgMaxFloat32Case2(XPUBaseTestCase):
class TestArgMaxCase2(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case3(XPUBaseTestCase):
class TestArgMaxCase3(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 1
class TestArgMaxFloat32Case4(XPUBaseTestCase):
class TestArgMaxCase4(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 2
class TestArgMaxFloat32Case5(XPUBaseTestCase):
class TestArgMaxCase5(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = -1
class TestArgMaxFloat32Case6(XPUBaseTestCase):
class TestArgMaxCase6(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case7(XPUBaseTestCase):
class TestArgMaxCase7(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1
class TestArgMaxFloat32Case8(XPUBaseTestCase):
class TestArgMaxCase8(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (1, )
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case9(XPUBaseTestCase):
class TestArgMaxCase9(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (2, )
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case10(XPUBaseTestCase):
class TestArgMaxCase10(XPUBaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, )
self.dtype = 'float32'
self.axis = 0
support_types = get_xpu_op_support_types('arg_max')
for stype in support_types:
create_test_class(globals(), XPUTestArgMax, stype)
class TestArgMaxAPI(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
......
......@@ -20,9 +20,8 @@ sys.path.append("..")
import numpy as np
import paddle
import paddle.fluid as fluid
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
......@@ -34,194 +33,87 @@ def gather_numpy(x, index, axis):
return gather
class TestXPUGatherOp(XPUOpTest):
class XPUTestGather(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'gather'
class TestXPUGatherOp(XPUOpTest):
def setUp(self):
self.op_type = "gather"
self.use_xpu = True
self.use_mkldnn = False
self.attrs = {'use_xpu': True}
self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type)
self.init_config()
xnp = np.random.random(self.x_shape).astype(self.dtype)
self.inputs = {
'X': xnp,
'Index': np.array(self.index).astype(self.index_type)
}
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def config(self):
"""
For multi-dimension input
"""
self.dtype = np.float32
def init_config(self):
self.x_shape = (10, 20)
self.x_type = np.float32
self.index = [1, 3, 5]
self.index_type = np.int32
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(self.place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
self.check_grad_with_place(self.place, ['X'], 'Out')
class TestCase1(TestXPUGatherOp):
def config(self):
"""
For one dimension input
"""
self.dtype = np.float32
class TestCase1(TestXPUGatherOp):
def init_config(self):
self.x_shape = (100)
self.x_type = np.float32
self.index = [1, 3, 5]
self.index_type = np.int32
class TestCase2(TestXPUGatherOp):
def config(self):
"""
For int64_t index type
"""
self.dtype = np.float32
class TestCase2(TestXPUGatherOp):
def init_config(self):
self.x_shape = (100)
self.x_type = np.float32
self.index = [1, 3, 5]
self.index_type = np.int64
class TestCase3(TestXPUGatherOp):
def config(self):
"""
For other input type
"""
self.dtype = np.float32
class TestCase3(TestXPUGatherOp):
def init_config(self):
self.x_shape = (10, 20)
self.x_type = np.float32
self.index = [1, 3, 5]
self.index_type = np.int32
class TestCase4(TestXPUGatherOp):
def config(self):
self.dtype = np.float32
class TestCase4(TestXPUGatherOp):
def init_config(self):
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False}
self.x_type = np.float32
self.attrs = {'overwrite': False}
self.index = [1, 1]
self.index_type = np.int32
class TestCase5(TestXPUGatherOp):
def config(self):
self.dtype = np.float32
class TestCase5(TestXPUGatherOp):
def init_config(self):
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False}
self.x_type = np.float32
self.attrs = {'overwrite': False}
self.index = [1, 1, 3]
self.index_type = np.int32
class TestCase6(TestXPUGatherOp):
def config(self):
self.dtype = np.float32
class TestCase6(TestXPUGatherOp):
def init_config(self):
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True}
self.x_type = np.float32
self.attrs = {'overwrite': True}
self.index = [1, 3]
self.index_type = np.int32
class TestCase7(TestXPUGatherOp):
def config(self):
self.dtype = np.float32
class TestCase7(TestXPUGatherOp):
def init_config(self):
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True}
self.x_type = np.float32
self.attrs = {'overwrite': True}
self.index = [1, 3]
self.index_type = np.int64
## test fp16
class TestCaseFP161(TestXPUGatherOp):
def config(self):
"""
For one dimension input
"""
self.dtype = np.float16
self.x_shape = (100)
self.x_type = np.float16
self.index = [1, 3, 5]
self.index_type = np.int32
class TestCaseFP162(TestXPUGatherOp):
def config(self):
"""
For int64_t index type
"""
self.dtype = np.float16
self.x_shape = (100)
self.x_type = np.float16
self.index = [1, 3, 5]
self.index_type = np.int64
class TestCaseFP163(TestXPUGatherOp):
def config(self):
"""
For other input type
"""
self.dtype = np.float16
self.x_shape = (10, 20)
self.x_type = np.float16
self.index = [1, 3, 5]
self.index_type = np.int32
class TestCaseFP164(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False}
self.x_type = np.float16
self.index = [1, 1]
self.index_type = np.int32
class TestCaseFP165(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False}
self.x_type = np.float16
self.index = [1, 1, 3]
self.index_type = np.int32
class TestCaseFP166(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True}
self.x_type = np.float16
self.index = [1, 3]
self.index_type = np.int32
class TestCaseFP167(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True}
self.x_type = np.float16
self.index = [1, 3]
self.index_type = np.int64
support_types = get_xpu_op_support_types('gather')
for stype in support_types:
create_test_class(globals(), XPUTestGather, stype)
if __name__ == "__main__":
unittest.main()
......@@ -18,10 +18,11 @@ import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid as fluid
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
......@@ -34,16 +35,18 @@ def np_masked_select(x, mask):
return result.flatten()
class TestMaskedSelectOp(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
class XPUTestMaskedSelectOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'masked_select'
class TestMaskedSelectOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.init()
self.init_dtype()
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "masked_select"
self.__class__.no_need_check_grad = True
x = np.random.random(self.shape).astype(self.dtype)
mask = np.array(np.random.randint(2, size=self.shape, dtype=bool))
out = np_masked_select(x, mask)
......@@ -53,42 +56,21 @@ class TestMaskedSelectOp(XPUOpTest):
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
def init(self):
self.shape = (50, 3)
def init_dtype(self):
self.dtype = np.float32
class TestMaskedSelectOp1(TestMaskedSelectOp):
class TestMaskedSelectOp1(TestMaskedSelectOp):
def init(self):
self.shape = (6, 8, 9, 18)
class TestMaskedSelectOp2(TestMaskedSelectOp):
class TestMaskedSelectOp2(TestMaskedSelectOp):
def init(self):
self.shape = (168, )
class TestMaskedSelectOpInt32(TestMaskedSelectOp):
def init_dtype(self):
self.dtype = np.int32
# skip_check_grad_ci(reason="get_numeric_gradient not support int32")
def test_check_grad(self):
pass
class TestMaskedSelectOpInt64(TestMaskedSelectOp):
def init_dtype(self):
self.dtype = np.int64
# skip_check_grad_ci(reason="get_numeric_gradient not support int64")
def test_check_grad(self):
pass
support_types = get_xpu_op_support_types('masked_select')
for stype in support_types:
create_test_class(globals(), XPUTestMaskedSelectOp, stype)
class TestMaskedSelectAPI(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册