未验证 提交 da492a13 编写于 作者: T TTerror 提交者: GitHub

refactoring gather/masked_select/arg_max unittests for kunlun, *test=kunlun (#39711)

上级 22abb6b3
...@@ -18,31 +18,32 @@ import unittest ...@@ -18,31 +18,32 @@ import unittest
import numpy as np import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
import paddle
from op_test import OpTest from op_test import OpTest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle.fluid.core as core
paddle.enable_static() paddle.enable_static()
class XPUBaseTestCase(XPUOpTest): class XPUTestArgMax(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'arg_max'
class XPUBaseTestCase(XPUOpTest):
def initTestCase(self): def initTestCase(self):
self.dims = (3, 4) self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1 self.axis = 1
def setUp(self): def setUp(self):
self.op_type = 'arg_max'
self.dtype = self.in_type
self.initTestCase() self.initTestCase()
self.__class__.op_type = 'arg_max'
self.__class__.use_xpu = True
np.random.seed(2021)
self.x = (np.random.random(self.dims)).astype(self.dtype) self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x} self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis, 'use_xpu': True} self.attrs = {'axis': self.axis, 'use_xpu': True}
if self.op_type == "arg_min":
self.outputs = {'Out': np.argmin(self.x, axis=self.axis)}
else:
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)} self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
def test_check_output(self): def test_check_output(self):
...@@ -50,88 +51,62 @@ class XPUBaseTestCase(XPUOpTest): ...@@ -50,88 +51,62 @@ class XPUBaseTestCase(XPUOpTest):
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place)
class TestArgMaxCase1(XPUBaseTestCase):
# test argmax, dtype: float32
class TestArgMaxFloat32Case1(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5) self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = -1 self.axis = -1
class TestArgMaxCase2(XPUBaseTestCase):
class TestArgMaxFloat32Case2(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5) self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0 self.axis = 0
class TestArgMaxCase3(XPUBaseTestCase):
class TestArgMaxFloat32Case3(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5) self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 1 self.axis = 1
class TestArgMaxCase4(XPUBaseTestCase):
class TestArgMaxFloat32Case4(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5) self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 2 self.axis = 2
class TestArgMaxCase5(XPUBaseTestCase):
class TestArgMaxFloat32Case5(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4) self.dims = (3, 4)
self.dtype = 'float32'
self.axis = -1 self.axis = -1
class TestArgMaxCase6(XPUBaseTestCase):
class TestArgMaxFloat32Case6(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4) self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 0 self.axis = 0
class TestArgMaxCase7(XPUBaseTestCase):
class TestArgMaxFloat32Case7(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4) self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1 self.axis = 1
class TestArgMaxCase8(XPUBaseTestCase):
class TestArgMaxFloat32Case8(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (1, ) self.dims = (1, )
self.dtype = 'float32'
self.axis = 0 self.axis = 0
class TestArgMaxCase9(XPUBaseTestCase):
class TestArgMaxFloat32Case9(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (2, ) self.dims = (2, )
self.dtype = 'float32'
self.axis = 0 self.axis = 0
class TestArgMaxCase10(XPUBaseTestCase):
class TestArgMaxFloat32Case10(XPUBaseTestCase):
def initTestCase(self): def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, ) self.dims = (3, )
self.dtype = 'float32'
self.axis = 0 self.axis = 0
support_types = get_xpu_op_support_types('arg_max')
for stype in support_types:
create_test_class(globals(), XPUTestArgMax, stype)
class TestArgMaxAPI(unittest.TestCase): class TestArgMaxAPI(unittest.TestCase):
def initTestCase(self): def initTestCase(self):
self.dims = (3, 4, 5) self.dims = (3, 4, 5)
......
...@@ -20,9 +20,8 @@ sys.path.append("..") ...@@ -20,9 +20,8 @@ sys.path.append("..")
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
from op_test import OpTest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
...@@ -34,194 +33,87 @@ def gather_numpy(x, index, axis): ...@@ -34,194 +33,87 @@ def gather_numpy(x, index, axis):
return gather return gather
class TestXPUGatherOp(XPUOpTest): class XPUTestGather(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'gather'
class TestXPUGatherOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "gather" self.op_type = "gather"
self.use_xpu = True self.place = paddle.XPUPlace(0)
self.use_mkldnn = False self.dtype = self.in_type
self.attrs = {'use_xpu': True}
self.config() self.init_config()
xnp = np.random.random(self.x_shape).astype(self.x_type) xnp = np.random.random(self.x_shape).astype(self.dtype)
self.inputs = { self.inputs = {
'X': xnp, 'X': xnp,
'Index': np.array(self.index).astype(self.index_type) 'Index': np.array(self.index).astype(self.index_type)
} }
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def config(self): def init_config(self):
"""
For multi-dimension input
"""
self.dtype = np.float32
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.x_type = np.float32
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = np.int32 self.index_type = np.int32
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0) self.check_output_with_place(self.place)
self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0) self.check_grad_with_place(self.place, ['X'], 'Out')
self.check_grad_with_place(place, ['X'], 'Out')
class TestCase1(TestXPUGatherOp): class TestCase1(TestXPUGatherOp):
def config(self): def init_config(self):
"""
For one dimension input
"""
self.dtype = np.float32
self.x_shape = (100) self.x_shape = (100)
self.x_type = np.float32
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = np.int32 self.index_type = np.int32
class TestCase2(TestXPUGatherOp):
class TestCase2(TestXPUGatherOp): def init_config(self):
def config(self):
"""
For int64_t index type
"""
self.dtype = np.float32
self.x_shape = (100) self.x_shape = (100)
self.x_type = np.float32
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = np.int64 self.index_type = np.int64
class TestCase3(TestXPUGatherOp):
class TestCase3(TestXPUGatherOp): def init_config(self):
def config(self):
"""
For other input type
"""
self.dtype = np.float32
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.x_type = np.float32
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = np.int32 self.index_type = np.int32
class TestCase4(TestXPUGatherOp):
class TestCase4(TestXPUGatherOp): def init_config(self):
def config(self):
self.dtype = np.float32
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False} self.attrs = {'overwrite': False}
self.x_type = np.float32
self.index = [1, 1] self.index = [1, 1]
self.index_type = np.int32 self.index_type = np.int32
class TestCase5(TestXPUGatherOp):
class TestCase5(TestXPUGatherOp): def init_config(self):
def config(self):
self.dtype = np.float32
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False} self.attrs = {'overwrite': False}
self.x_type = np.float32
self.index = [1, 1, 3] self.index = [1, 1, 3]
self.index_type = np.int32 self.index_type = np.int32
class TestCase6(TestXPUGatherOp):
class TestCase6(TestXPUGatherOp): def init_config(self):
def config(self):
self.dtype = np.float32
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True} self.attrs = {'overwrite': True}
self.x_type = np.float32
self.index = [1, 3] self.index = [1, 3]
self.index_type = np.int32 self.index_type = np.int32
class TestCase7(TestXPUGatherOp):
class TestCase7(TestXPUGatherOp): def init_config(self):
def config(self):
self.dtype = np.float32
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True} self.attrs = {'overwrite': True}
self.x_type = np.float32
self.index = [1, 3] self.index = [1, 3]
self.index_type = np.int64 self.index_type = np.int64
## test fp16 support_types = get_xpu_op_support_types('gather')
class TestCaseFP161(TestXPUGatherOp): for stype in support_types:
def config(self): create_test_class(globals(), XPUTestGather, stype)
"""
For one dimension input
"""
self.dtype = np.float16
self.x_shape = (100)
self.x_type = np.float16
self.index = [1, 3, 5]
self.index_type = np.int32
class TestCaseFP162(TestXPUGatherOp):
def config(self):
"""
For int64_t index type
"""
self.dtype = np.float16
self.x_shape = (100)
self.x_type = np.float16
self.index = [1, 3, 5]
self.index_type = np.int64
class TestCaseFP163(TestXPUGatherOp):
def config(self):
"""
For other input type
"""
self.dtype = np.float16
self.x_shape = (10, 20)
self.x_type = np.float16
self.index = [1, 3, 5]
self.index_type = np.int32
class TestCaseFP164(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False}
self.x_type = np.float16
self.index = [1, 1]
self.index_type = np.int32
class TestCaseFP165(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': False}
self.x_type = np.float16
self.index = [1, 1, 3]
self.index_type = np.int32
class TestCaseFP166(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True}
self.x_type = np.float16
self.index = [1, 3]
self.index_type = np.int32
class TestCaseFP167(TestXPUGatherOp):
def config(self):
self.dtype = np.float16
self.x_shape = (10, 20)
self.attrs = {'use_xpu': True, 'overwrite': True}
self.x_type = np.float16
self.index = [1, 3]
self.index_type = np.int64
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -18,10 +18,11 @@ import numpy as np ...@@ -18,10 +18,11 @@ import numpy as np
import unittest import unittest
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
...@@ -34,16 +35,18 @@ def np_masked_select(x, mask): ...@@ -34,16 +35,18 @@ def np_masked_select(x, mask):
return result.flatten() return result.flatten()
class TestMaskedSelectOp(XPUOpTest): class XPUTestMaskedSelectOp(XPUOpTestWrapper):
def set_xpu(self): def __init__(self):
self.__class__.use_xpu = True self.op_name = 'masked_select'
class TestMaskedSelectOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.init() self.init()
self.init_dtype() self.dtype = self.in_type
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.op_type = "masked_select" self.op_type = "masked_select"
self.__class__.no_need_check_grad = True
x = np.random.random(self.shape).astype(self.dtype) x = np.random.random(self.shape).astype(self.dtype)
mask = np.array(np.random.randint(2, size=self.shape, dtype=bool)) mask = np.array(np.random.randint(2, size=self.shape, dtype=bool))
out = np_masked_select(x, mask) out = np_masked_select(x, mask)
...@@ -53,42 +56,21 @@ class TestMaskedSelectOp(XPUOpTest): ...@@ -53,42 +56,21 @@ class TestMaskedSelectOp(XPUOpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self):
pass
def init(self): def init(self):
self.shape = (50, 3) self.shape = (50, 3)
def init_dtype(self): class TestMaskedSelectOp1(TestMaskedSelectOp):
self.dtype = np.float32
class TestMaskedSelectOp1(TestMaskedSelectOp):
def init(self): def init(self):
self.shape = (6, 8, 9, 18) self.shape = (6, 8, 9, 18)
class TestMaskedSelectOp2(TestMaskedSelectOp):
class TestMaskedSelectOp2(TestMaskedSelectOp):
def init(self): def init(self):
self.shape = (168, ) self.shape = (168, )
class TestMaskedSelectOpInt32(TestMaskedSelectOp): support_types = get_xpu_op_support_types('masked_select')
def init_dtype(self): for stype in support_types:
self.dtype = np.int32 create_test_class(globals(), XPUTestMaskedSelectOp, stype)
# skip_check_grad_ci(reason="get_numeric_gradient not support int32")
def test_check_grad(self):
pass
class TestMaskedSelectOpInt64(TestMaskedSelectOp):
def init_dtype(self):
self.dtype = np.int64
# skip_check_grad_ci(reason="get_numeric_gradient not support int64")
def test_check_grad(self):
pass
class TestMaskedSelectAPI(unittest.TestCase): class TestMaskedSelectAPI(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册