未验证 提交 a3247ab5 编写于 作者: T TTerror 提交者: GitHub

refactoring where/where_index/scatter unittests for kunlun, *test=kunlun (#39619)

上级 c05cd7ed
...@@ -60,10 +60,11 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): ...@@ -60,10 +60,11 @@ class XPUTestArgsortOp1(XPUOpTestWrapper):
class TestArgsortOp(XPUOpTest): class TestArgsortOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "argsort" self.op_type = "argsort"
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
self.dtype = self.in_type self.dtype = self.in_type
self.input_shape = (2, 2, 2, 3, 3) self.input_shape = (2, 2, 2, 3, 3)
self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis
self.descending = False if not hasattr( self.descending = False if not hasattr(
...@@ -94,10 +95,6 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): ...@@ -94,10 +95,6 @@ class XPUTestArgsortOp1(XPUOpTestWrapper):
self.x, kind='heapsort', axis=self.axis) self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
...@@ -110,9 +107,10 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): ...@@ -110,9 +107,10 @@ class XPUTestArgsortOp2(XPUOpTestWrapper):
class TestArgsortOp(XPUOpTest): class TestArgsortOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "argsort" self.op_type = "argsort"
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
self.init_dtype() self.init_dtype()
self.init_inputshape() self.init_inputshape()
self.init_axis() self.init_axis()
...@@ -143,10 +141,6 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): ...@@ -143,10 +141,6 @@ class XPUTestArgsortOp2(XPUOpTestWrapper):
self.x, kind='heapsort', axis=self.axis) self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def init_inputshape(self): def init_inputshape(self):
self.input_shape = (2, 2, 2, 3, 3) self.input_shape = (2, 2, 2, 3, 3)
...@@ -220,11 +214,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): ...@@ -220,11 +214,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper):
class TestHuberLossOp(XPUOpTest): class TestHuberLossOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = 'huber_loss' self.op_type = 'huber_loss'
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.init_dtype()
self.set_inputs() self.set_inputs()
self.set_attrs() self.set_attrs()
...@@ -253,12 +245,6 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): ...@@ -253,12 +245,6 @@ class XPUTestHuberLossOp(XPUOpTestWrapper):
def set_shape(self): def set_shape(self):
return (100, 1) return (100, 1)
def set_xpu(self):
self.__class__.use_xpu = True
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
......
...@@ -18,152 +18,125 @@ import numpy as np ...@@ -18,152 +18,125 @@ import numpy as np
import unittest import unittest
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid as fluid from op_test_xpu import XPUOpTest
import paddle.fluid.core as core from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper, type_dict_str_to_numpy
paddle.enable_static() paddle.enable_static()
class TestScatterOp(XPUOpTest): class XPUTestScatterOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
self.set_xpu() self.op_name = 'scatter'
self.op_type = "scatter" self.use_dynamic_create_class = True
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 50)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 50)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
def dynamic_create_class(self):
base_class = self.TestScatterOp
classes = []
test_data_case = []
class TestScatterOp0(TestScatterOp): # case1
def setUp(self): ref_np = np.ones((3, 50))
self.set_xpu() index_np = np.array([1, 2])
self.op_type = "scatter" updates_np = np.random.random((2, 50))
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] = updates_np output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} data_dict = {
self.attrs = {'overwrite': True} 'init_ref_np': ref_np,
self.outputs = {'Out': output_np} 'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
class TestScatterOp1(TestScatterOp): 'test_name': 'case1'
def setUp(self): }
self.set_xpu() test_data_case.append(data_dict)
self.op_type = "scatter"
self.place = paddle.XPUPlace(0) # case2
ref_np = np.ones((3, 3))
ref_np = np.ones((3, 3)).astype("float32") index_np = np.array([1, 2])
zeros_np = np.zeros([2, 3]).astype('float32') updates_np = np.random.random((2, 3))
index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = zeros_np
for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False}
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp2(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] = updates_np output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} data_dict = {
self.outputs = {'Out': output_np} 'init_ref_np': ref_np,
'init_index_np': index_np,
'init_updates_np': updates_np,
class TestScatterOp3(TestScatterOp): 'init_output_np': output_np,
def setUp(self): 'test_name': 'case2'
self.set_xpu() }
self.op_type = "scatter" test_data_case.append(data_dict)
self.place = paddle.XPUPlace(0)
# case3
ref_np = np.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3))
zeros_np = np.zeros([2, 3]).astype('float32') zeros_np = np.zeros([2, 3])
index_np = np.array([1, 1]).astype("int32") index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32") updates_np = np.random.randint(low=-1000, high=1000, size=(2, 3))
output_np = np.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] = zeros_np output_np[index_np] = zeros_np
for i in range(0, len(index_np)): for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i] output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False} data_dict = {
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} 'init_ref_np': ref_np,
self.outputs = {'Out': output_np} 'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
class TestScatterOp4(TestScatterOp): 'test_name': 'case3'
}
test_data_case.append(data_dict)
for data_dict in test_data_case:
for index_type in ['int32', 'int64']:
for overwrite in [True, False]:
class_name = 'XPUTestScatterOp_index_type_' + data_dict[
'test_name'] + '_' + str(index_type) + '_' + str(
overwrite)
attr_dict = data_dict
attr_dict['index_type'] = type_dict_str_to_numpy[index_type]
attr_dict['init_overwrite'] = overwrite
classes.append([class_name, attr_dict])
return base_class, classes
class TestScatterOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu() self.init_config()
self.index_type = np.int32 if not hasattr(
self, 'index_type') else self.index_type
self.overwrite = True if not hasattr(
self, 'init_overwrite') else self.init_overwrite
if not hasattr(self, 'init_ref_np'):
self.ref_np = np.ones((3, 50)).astype(self.dtype)
self.index_np = np.array([1, 2]).astype(self.index_type)
self.updates_np = np.random.random((2, 50)).astype(self.dtype)
self.output_np = np.copy(self.ref_np)
self.output_np[self.index_np] = self.updates_np
else:
self.ref_np = self.init_ref_np.astype(self.dtype)
self.index_np = self.init_index_np.astype(self.index_type)
self.updates_np = self.init_updates_np.astype(self.dtype)
self.output_np = self.init_output_np.astype(self.dtype)
self.inputs = {
'X': self.ref_np,
'Ids': self.index_np,
'Updates': self.updates_np
}
self.attrs = {'overwrite': self.overwrite}
self.outputs = {'Out': self.output_np}
def init_config(self):
self.op_type = "scatter" self.op_type = "scatter"
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
ref_np = np.ones((3, 3)).astype("float32") def test_check_output(self):
index_np = np.array([1, 2]).astype("int64") self.check_output_with_place(self.place)
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp5(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp6(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("int64")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("int64")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
support_types = get_xpu_op_support_types('scatter')
for stype in support_types:
create_test_class(globals(), XPUTestScatterOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -16,64 +16,65 @@ from __future__ import print_function ...@@ -16,64 +16,65 @@ from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest import paddle
from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
class TestWhereIndexOp(XPUOpTest): class XPUTestWhereIndexOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'where_index'
class TestWhereIndexOp(XPUOpTest):
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "where_index"
self.place = paddle.XPUPlace(0)
self.init_config() self.init_config()
self.init_data()
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def init_data(self):
pass self.inputs = {
'Condition': np.array([True, False, True]).astype(self.dtype),
def init_config(self): }
self.inputs = {'Condition': np.array([True, False, True]), }
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')} self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
def set_xpu(self):
self.__class__.use_xpu = True
class TestNotBool(TestWhereIndexOp):
def init_config(self): def init_config(self):
self.inputs = {'Condition': np.array([1, 0, 8]), } self.op_type = "where_index"
self.place = paddle.XPUPlace(0)
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')} self.dtype = self.in_type
self.__class__.no_need_check_grad = True
class TestAllFalse(TestWhereIndexOp): class TestAllFalse(TestWhereIndexOp):
def init_config(self): def init_data(self):
self.inputs = {'Condition': np.array([False, False, False]), } self.inputs = {
'Condition': np.array([False, False, False]).astype(self.dtype),
}
self.outputs = {'Out': np.array([], dtype='int64')} self.outputs = {'Out': np.array([], dtype='int64')}
class TestRank2(TestWhereIndexOp):
class TestRank2(TestWhereIndexOp): def init_data(self):
def init_config(self): self.inputs = {
self.inputs = {'Condition': np.array([[True, False], [False, True]]), } 'Condition':
np.array([[True, False], [False, True]]).astype(self.dtype),
}
self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')} self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')}
class TestRank3(TestWhereIndexOp):
class TestRank3(TestWhereIndexOp): def init_data(self):
def init_config(self):
self.inputs = { self.inputs = {
'Condition': np.array([[[True, False], [False, True]], 'Condition':
np.array([[[True, False], [False, True]],
[[False, True], [True, False]], [[False, True], [True, False]],
[[False, False], [False, True]]]), [[False, False], [False, True]]]).astype(self.dtype),
} }
self.outputs = { self.outputs = {
...@@ -83,6 +84,11 @@ class TestRank3(TestWhereIndexOp): ...@@ -83,6 +84,11 @@ class TestRank3(TestWhereIndexOp):
} }
support_types = get_xpu_op_support_types('where_index')
for stype in support_types:
create_test_class(globals(), XPUTestWhereIndexOp, stype)
class TestWhereOpError(unittest.TestCase): class TestWhereOpError(unittest.TestCase):
def test_api(self): def test_api(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
......
...@@ -18,52 +18,61 @@ import numpy as np ...@@ -18,52 +18,61 @@ import numpy as np
import unittest import unittest
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program from paddle.fluid import Program
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
class TestXPUWhereOp(XPUOpTest): class XPUTestWhereOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'where'
class TestXPUWhereOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "where"
self.set_xpu()
self.init_config() self.init_config()
self.init_data()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y} self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)} self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def init_config(self): def init_data(self):
self.x = np.random.uniform(-3, 5, (100)).astype("float32") self.x = np.random.uniform(-3, 5, (100)).astype(self.dtype)
self.y = np.random.uniform(-3, 5, (100)).astype("float32") self.y = np.random.uniform(-3, 5, (100)).astype(self.dtype)
self.cond = np.zeros((100)).astype("bool") self.cond = np.zeros((100)).astype("bool")
def set_xpu(self): def init_config(self):
self.__class__.use_xpu = True self.op_type = "where"
self.dtype = self.in_type
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad_normal(self): class TestXPUWhereOp2(TestXPUWhereOp):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def init_data(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
self.y = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
class TestXPUWhereOp2(TestXPUWhereOp):
def init_config(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype("float32")
self.y = np.random.uniform(-5, 5, (60, 2)).astype("float32")
self.cond = np.ones((60, 2)).astype("bool") self.cond = np.ones((60, 2)).astype("bool")
class TestXPUWhereOp3(TestXPUWhereOp):
def init_data(self):
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
self.cond = np.array(
np.random.randint(
2, size=(20, 2, 4)), dtype=bool)
class TestXPUWhereOp3(TestXPUWhereOp): support_types = get_xpu_op_support_types('where')
def init_config(self): for stype in support_types:
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype("float32") create_test_class(globals(), XPUTestWhereOp, stype)
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype("float32")
self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool)
class TestXPUWhereAPI(unittest.TestCase): class TestXPUWhereAPI(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册