未验证 提交 a3247ab5 编写于 作者: T TTerror 提交者: GitHub

refactoring where/where_index/scatter unittests for kunlun, *test=kunlun (#39619)

上级 c05cd7ed
...@@ -60,10 +60,11 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): ...@@ -60,10 +60,11 @@ class XPUTestArgsortOp1(XPUOpTestWrapper):
class TestArgsortOp(XPUOpTest): class TestArgsortOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "argsort" self.op_type = "argsort"
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
self.dtype = self.in_type self.dtype = self.in_type
self.input_shape = (2, 2, 2, 3, 3) self.input_shape = (2, 2, 2, 3, 3)
self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis
self.descending = False if not hasattr( self.descending = False if not hasattr(
...@@ -94,10 +95,6 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): ...@@ -94,10 +95,6 @@ class XPUTestArgsortOp1(XPUOpTestWrapper):
self.x, kind='heapsort', axis=self.axis) self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
...@@ -110,9 +107,10 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): ...@@ -110,9 +107,10 @@ class XPUTestArgsortOp2(XPUOpTestWrapper):
class TestArgsortOp(XPUOpTest): class TestArgsortOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "argsort" self.op_type = "argsort"
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
self.init_dtype() self.init_dtype()
self.init_inputshape() self.init_inputshape()
self.init_axis() self.init_axis()
...@@ -143,10 +141,6 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): ...@@ -143,10 +141,6 @@ class XPUTestArgsortOp2(XPUOpTestWrapper):
self.x, kind='heapsort', axis=self.axis) self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def init_inputshape(self): def init_inputshape(self):
self.input_shape = (2, 2, 2, 3, 3) self.input_shape = (2, 2, 2, 3, 3)
...@@ -220,11 +214,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): ...@@ -220,11 +214,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper):
class TestHuberLossOp(XPUOpTest): class TestHuberLossOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = 'huber_loss' self.op_type = 'huber_loss'
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.init_dtype()
self.set_inputs() self.set_inputs()
self.set_attrs() self.set_attrs()
...@@ -253,12 +245,6 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): ...@@ -253,12 +245,6 @@ class XPUTestHuberLossOp(XPUOpTestWrapper):
def set_shape(self): def set_shape(self):
return (100, 1) return (100, 1)
def set_xpu(self):
self.__class__.use_xpu = True
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
......
...@@ -18,152 +18,125 @@ import numpy as np ...@@ -18,152 +18,125 @@ import numpy as np
import unittest import unittest
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid as fluid from op_test_xpu import XPUOpTest
import paddle.fluid.core as core from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper, type_dict_str_to_numpy
paddle.enable_static() paddle.enable_static()
class TestScatterOp(XPUOpTest): class XPUTestScatterOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
self.set_xpu() self.op_name = 'scatter'
self.op_type = "scatter" self.use_dynamic_create_class = True
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 50)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 50)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestScatterOp0(TestScatterOp): def dynamic_create_class(self):
def setUp(self): base_class = self.TestScatterOp
self.set_xpu() classes = []
self.op_type = "scatter" test_data_case = []
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32") # case1
index_np = np.array([1, 2]).astype("int32") ref_np = np.ones((3, 50))
updates_np = np.random.random((2, 3)).astype("float32") index_np = np.array([1, 2])
updates_np = np.random.random((2, 50))
output_np = np.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] = updates_np output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} data_dict = {
self.attrs = {'overwrite': True} 'init_ref_np': ref_np,
self.outputs = {'Out': output_np} 'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
class TestScatterOp1(TestScatterOp): 'test_name': 'case1'
def setUp(self): }
self.set_xpu() test_data_case.append(data_dict)
self.op_type = "scatter"
self.place = paddle.XPUPlace(0) # case2
ref_np = np.ones((3, 3))
ref_np = np.ones((3, 3)).astype("float32") index_np = np.array([1, 2])
zeros_np = np.zeros([2, 3]).astype('float32') updates_np = np.random.random((2, 3))
index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = zeros_np
for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False}
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp2(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] = updates_np output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} data_dict = {
self.outputs = {'Out': output_np} 'init_ref_np': ref_np,
'init_index_np': index_np,
'init_updates_np': updates_np,
class TestScatterOp3(TestScatterOp): 'init_output_np': output_np,
def setUp(self): 'test_name': 'case2'
self.set_xpu() }
self.op_type = "scatter" test_data_case.append(data_dict)
self.place = paddle.XPUPlace(0)
# case3
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3))
zeros_np = np.zeros([2, 3]).astype('float32') zeros_np = np.zeros([2, 3])
index_np = np.array([1, 1]).astype("int32") index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32") updates_np = np.random.randint(low=-1000, high=1000, size=(2, 3))
output_np = np.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] = zeros_np output_np[index_np] = zeros_np
for i in range(0, len(index_np)): for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i] output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False} data_dict = {
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} 'init_ref_np': ref_np,
self.outputs = {'Out': output_np} 'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
class TestScatterOp4(TestScatterOp): 'test_name': 'case3'
def setUp(self): }
self.set_xpu() test_data_case.append(data_dict)
self.op_type = "scatter"
self.place = paddle.XPUPlace(0) for data_dict in test_data_case:
for index_type in ['int32', 'int64']:
ref_np = np.ones((3, 3)).astype("float32") for overwrite in [True, False]:
index_np = np.array([1, 2]).astype("int64") class_name = 'XPUTestScatterOp_index_type_' + data_dict[
updates_np = np.random.random((2, 3)).astype("float32") 'test_name'] + '_' + str(index_type) + '_' + str(
output_np = np.copy(ref_np) overwrite)
output_np[index_np] = updates_np attr_dict = data_dict
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} attr_dict['index_type'] = type_dict_str_to_numpy[index_type]
self.outputs = {'Out': output_np} attr_dict['init_overwrite'] = overwrite
classes.append([class_name, attr_dict])
return base_class, classes
class TestScatterOp5(TestScatterOp):
def setUp(self): class TestScatterOp(XPUOpTest):
self.set_xpu() def setUp(self):
self.op_type = "scatter" self.init_config()
self.place = paddle.XPUPlace(0) self.index_type = np.int32 if not hasattr(
self, 'index_type') else self.index_type
ref_np = np.ones((3, 3)).astype("float32") self.overwrite = True if not hasattr(
index_np = np.array([1, 2]).astype("int64") self, 'init_overwrite') else self.init_overwrite
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np) if not hasattr(self, 'init_ref_np'):
output_np[index_np] = updates_np self.ref_np = np.ones((3, 50)).astype(self.dtype)
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} self.index_np = np.array([1, 2]).astype(self.index_type)
self.outputs = {'Out': output_np} self.updates_np = np.random.random((2, 50)).astype(self.dtype)
self.output_np = np.copy(self.ref_np)
self.output_np[self.index_np] = self.updates_np
class TestScatterOp6(TestScatterOp): else:
def setUp(self): self.ref_np = self.init_ref_np.astype(self.dtype)
self.set_xpu() self.index_np = self.init_index_np.astype(self.index_type)
self.op_type = "scatter" self.updates_np = self.init_updates_np.astype(self.dtype)
self.place = paddle.XPUPlace(0) self.output_np = self.init_output_np.astype(self.dtype)
ref_np = np.ones((3, 3)).astype("int64") self.inputs = {
index_np = np.array([1, 2]).astype("int64") 'X': self.ref_np,
updates_np = np.random.random((2, 3)).astype("int64") 'Ids': self.index_np,
output_np = np.copy(ref_np) 'Updates': self.updates_np
output_np[index_np] = updates_np }
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} self.attrs = {'overwrite': self.overwrite}
self.outputs = {'Out': output_np} self.outputs = {'Out': self.output_np}
def init_config(self):
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place)
support_types = get_xpu_op_support_types('scatter')
for stype in support_types:
create_test_class(globals(), XPUTestScatterOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -16,71 +16,77 @@ from __future__ import print_function ...@@ -16,71 +16,77 @@ from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest import paddle
from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
paddle.enable_static() from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestWhereIndexOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "where_index"
self.place = paddle.XPUPlace(0)
self.init_config()
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
def init_config(self):
self.inputs = {'Condition': np.array([True, False, True]), }
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
def set_xpu(self):
self.__class__.use_xpu = True
class TestNotBool(TestWhereIndexOp):
def init_config(self):
self.inputs = {'Condition': np.array([1, 0, 8]), }
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
class TestAllFalse(TestWhereIndexOp):
def init_config(self):
self.inputs = {'Condition': np.array([False, False, False]), }
self.outputs = {'Out': np.array([], dtype='int64')}
class TestRank2(TestWhereIndexOp):
def init_config(self):
self.inputs = {'Condition': np.array([[True, False], [False, True]]), }
self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')}
paddle.enable_static()
class TestRank3(TestWhereIndexOp):
def init_config(self):
self.inputs = {
'Condition': np.array([[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [False, True]]]),
}
self.outputs = { class XPUTestWhereIndexOp(XPUOpTestWrapper):
'Out': np.array( def __init__(self):
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], self.op_name = 'where_index'
dtype='int64')
} class TestWhereIndexOp(XPUOpTest):
def setUp(self):
self.init_config()
self.init_data()
def test_check_output(self):
self.check_output_with_place(self.place)
def init_data(self):
self.inputs = {
'Condition': np.array([True, False, True]).astype(self.dtype),
}
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
def init_config(self):
self.op_type = "where_index"
self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
class TestAllFalse(TestWhereIndexOp):
def init_data(self):
self.inputs = {
'Condition': np.array([False, False, False]).astype(self.dtype),
}
self.outputs = {'Out': np.array([], dtype='int64')}
class TestRank2(TestWhereIndexOp):
def init_data(self):
self.inputs = {
'Condition':
np.array([[True, False], [False, True]]).astype(self.dtype),
}
self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')}
class TestRank3(TestWhereIndexOp):
def init_data(self):
self.inputs = {
'Condition':
np.array([[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [False, True]]]).astype(self.dtype),
}
self.outputs = {
'Out': np.array(
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]],
dtype='int64')
}
support_types = get_xpu_op_support_types('where_index')
for stype in support_types:
create_test_class(globals(), XPUTestWhereIndexOp, stype)
class TestWhereOpError(unittest.TestCase): class TestWhereOpError(unittest.TestCase):
......
...@@ -18,52 +18,61 @@ import numpy as np ...@@ -18,52 +18,61 @@ import numpy as np
import unittest import unittest
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program from paddle.fluid import Program
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
paddle.enable_static() from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestXPUWhereOp(XPUOpTest):
def setUp(self):
self.op_type = "where"
self.set_xpu()
self.init_config()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def init_config(self):
self.x = np.random.uniform(-3, 5, (100)).astype("float32")
self.y = np.random.uniform(-3, 5, (100)).astype("float32")
self.cond = np.zeros((100)).astype("bool")
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
class TestXPUWhereOp2(TestXPUWhereOp): paddle.enable_static()
def init_config(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype("float32")
self.y = np.random.uniform(-5, 5, (60, 2)).astype("float32")
self.cond = np.ones((60, 2)).astype("bool")
class TestXPUWhereOp3(TestXPUWhereOp): class XPUTestWhereOp(XPUOpTestWrapper):
def init_config(self): def __init__(self):
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype("float32") self.op_name = 'where'
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype("float32")
self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool) class TestXPUWhereOp(XPUOpTest):
def setUp(self):
self.init_config()
self.init_data()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def init_data(self):
self.x = np.random.uniform(-3, 5, (100)).astype(self.dtype)
self.y = np.random.uniform(-3, 5, (100)).astype(self.dtype)
self.cond = np.zeros((100)).astype("bool")
def init_config(self):
self.op_type = "where"
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestXPUWhereOp2(TestXPUWhereOp):
def init_data(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
self.y = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
self.cond = np.ones((60, 2)).astype("bool")
class TestXPUWhereOp3(TestXPUWhereOp):
def init_data(self):
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
self.cond = np.array(
np.random.randint(
2, size=(20, 2, 4)), dtype=bool)
support_types = get_xpu_op_support_types('where')
for stype in support_types:
create_test_class(globals(), XPUTestWhereOp, stype)
class TestXPUWhereAPI(unittest.TestCase): class TestXPUWhereAPI(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册