未验证 提交 a3247ab5 编写于 作者: T TTerror 提交者: GitHub

refactoring where/where_index/scatter unittests for kunlun, *test=kunlun (#39619)

上级 c05cd7ed
......@@ -60,10 +60,11 @@ class XPUTestArgsortOp1(XPUOpTestWrapper):
class TestArgsortOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "argsort"
self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
self.dtype = self.in_type
self.input_shape = (2, 2, 2, 3, 3)
self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis
self.descending = False if not hasattr(
......@@ -94,10 +95,6 @@ class XPUTestArgsortOp1(XPUOpTestWrapper):
self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place)
......@@ -110,9 +107,10 @@ class XPUTestArgsortOp2(XPUOpTestWrapper):
class TestArgsortOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "argsort"
self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
self.init_dtype()
self.init_inputshape()
self.init_axis()
......@@ -143,10 +141,6 @@ class XPUTestArgsortOp2(XPUOpTestWrapper):
self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def init_inputshape(self):
self.input_shape = (2, 2, 2, 3, 3)
......@@ -220,11 +214,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper):
class TestHuberLossOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = 'huber_loss'
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.dtype = self.in_type
self.set_inputs()
self.set_attrs()
......@@ -253,12 +245,6 @@ class XPUTestHuberLossOp(XPUOpTestWrapper):
def set_shape(self):
return (100, 1)
def set_xpu(self):
self.__class__.use_xpu = True
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
self.check_output_with_place(self.place)
......
......@@ -18,152 +18,125 @@ import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper, type_dict_str_to_numpy
paddle.enable_static()
class TestScatterOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 50)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 50)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class XPUTestScatterOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'scatter'
self.use_dynamic_create_class = True
class TestScatterOp0(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
def dynamic_create_class(self):
base_class = self.TestScatterOp
classes = []
test_data_case = []
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
# case1
ref_np = np.ones((3, 50))
index_np = np.array([1, 2])
updates_np = np.random.random((2, 50))
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.attrs = {'overwrite': True}
self.outputs = {'Out': output_np}
class TestScatterOp1(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = zeros_np
for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False}
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp2(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
data_dict = {
'init_ref_np': ref_np,
'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
'test_name': 'case1'
}
test_data_case.append(data_dict)
# case2
ref_np = np.ones((3, 3))
index_np = np.array([1, 2])
updates_np = np.random.random((2, 3))
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp3(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32')
data_dict = {
'init_ref_np': ref_np,
'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
'test_name': 'case2'
}
test_data_case.append(data_dict)
# case3
ref_np = np.ones((3, 3))
zeros_np = np.zeros([2, 3])
index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
updates_np = np.random.randint(low=-1000, high=1000, size=(2, 3))
output_np = np.copy(ref_np)
output_np[index_np] = zeros_np
for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False}
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp4(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp5(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
class TestScatterOp6(TestScatterOp):
def setUp(self):
self.set_xpu()
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
ref_np = np.ones((3, 3)).astype("int64")
index_np = np.array([1, 2]).astype("int64")
updates_np = np.random.random((2, 3)).astype("int64")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
data_dict = {
'init_ref_np': ref_np,
'init_index_np': index_np,
'init_updates_np': updates_np,
'init_output_np': output_np,
'test_name': 'case3'
}
test_data_case.append(data_dict)
for data_dict in test_data_case:
for index_type in ['int32', 'int64']:
for overwrite in [True, False]:
class_name = 'XPUTestScatterOp_index_type_' + data_dict[
'test_name'] + '_' + str(index_type) + '_' + str(
overwrite)
attr_dict = data_dict
attr_dict['index_type'] = type_dict_str_to_numpy[index_type]
attr_dict['init_overwrite'] = overwrite
classes.append([class_name, attr_dict])
return base_class, classes
class TestScatterOp(XPUOpTest):
def setUp(self):
self.init_config()
self.index_type = np.int32 if not hasattr(
self, 'index_type') else self.index_type
self.overwrite = True if not hasattr(
self, 'init_overwrite') else self.init_overwrite
if not hasattr(self, 'init_ref_np'):
self.ref_np = np.ones((3, 50)).astype(self.dtype)
self.index_np = np.array([1, 2]).astype(self.index_type)
self.updates_np = np.random.random((2, 50)).astype(self.dtype)
self.output_np = np.copy(self.ref_np)
self.output_np[self.index_np] = self.updates_np
else:
self.ref_np = self.init_ref_np.astype(self.dtype)
self.index_np = self.init_index_np.astype(self.index_type)
self.updates_np = self.init_updates_np.astype(self.dtype)
self.output_np = self.init_output_np.astype(self.dtype)
self.inputs = {
'X': self.ref_np,
'Ids': self.index_np,
'Updates': self.updates_np
}
self.attrs = {'overwrite': self.overwrite}
self.outputs = {'Out': self.output_np}
def init_config(self):
self.op_type = "scatter"
self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place)
support_types = get_xpu_op_support_types('scatter')
for stype in support_types:
create_test_class(globals(), XPUTestScatterOp, stype)
if __name__ == '__main__':
unittest.main()
......@@ -16,71 +16,77 @@ from __future__ import print_function
import numpy as np
import unittest
import paddle
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
from paddle.fluid.op import Operator
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
paddle.enable_static()
class TestWhereIndexOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "where_index"
self.place = paddle.XPUPlace(0)
self.init_config()
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
def init_config(self):
self.inputs = {'Condition': np.array([True, False, True]), }
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
def set_xpu(self):
self.__class__.use_xpu = True
class TestNotBool(TestWhereIndexOp):
def init_config(self):
self.inputs = {'Condition': np.array([1, 0, 8]), }
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
class TestAllFalse(TestWhereIndexOp):
def init_config(self):
self.inputs = {'Condition': np.array([False, False, False]), }
self.outputs = {'Out': np.array([], dtype='int64')}
class TestRank2(TestWhereIndexOp):
def init_config(self):
self.inputs = {'Condition': np.array([[True, False], [False, True]]), }
self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')}
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class TestRank3(TestWhereIndexOp):
def init_config(self):
self.inputs = {
'Condition': np.array([[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [False, True]]]),
}
self.outputs = {
'Out': np.array(
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]],
dtype='int64')
}
class XPUTestWhereIndexOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'where_index'
class TestWhereIndexOp(XPUOpTest):
def setUp(self):
self.init_config()
self.init_data()
def test_check_output(self):
self.check_output_with_place(self.place)
def init_data(self):
self.inputs = {
'Condition': np.array([True, False, True]).astype(self.dtype),
}
self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}
def init_config(self):
self.op_type = "where_index"
self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
class TestAllFalse(TestWhereIndexOp):
def init_data(self):
self.inputs = {
'Condition': np.array([False, False, False]).astype(self.dtype),
}
self.outputs = {'Out': np.array([], dtype='int64')}
class TestRank2(TestWhereIndexOp):
def init_data(self):
self.inputs = {
'Condition':
np.array([[True, False], [False, True]]).astype(self.dtype),
}
self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')}
class TestRank3(TestWhereIndexOp):
def init_data(self):
self.inputs = {
'Condition':
np.array([[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [False, True]]]).astype(self.dtype),
}
self.outputs = {
'Out': np.array(
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]],
dtype='int64')
}
support_types = get_xpu_op_support_types('where_index')
for stype in support_types:
create_test_class(globals(), XPUTestWhereIndexOp, stype)
class TestWhereOpError(unittest.TestCase):
......
......@@ -18,52 +18,61 @@ import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program
from paddle.fluid.backward import append_backward
paddle.enable_static()
class TestXPUWhereOp(XPUOpTest):
def setUp(self):
self.op_type = "where"
self.set_xpu()
self.init_config()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def init_config(self):
self.x = np.random.uniform(-3, 5, (100)).astype("float32")
self.y = np.random.uniform(-3, 5, (100)).astype("float32")
self.cond = np.zeros((100)).astype("bool")
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestXPUWhereOp2(TestXPUWhereOp):
def init_config(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype("float32")
self.y = np.random.uniform(-5, 5, (60, 2)).astype("float32")
self.cond = np.ones((60, 2)).astype("bool")
paddle.enable_static()
class TestXPUWhereOp3(TestXPUWhereOp):
def init_config(self):
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype("float32")
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype("float32")
self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool)
class XPUTestWhereOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'where'
class TestXPUWhereOp(XPUOpTest):
def setUp(self):
self.init_config()
self.init_data()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def init_data(self):
self.x = np.random.uniform(-3, 5, (100)).astype(self.dtype)
self.y = np.random.uniform(-3, 5, (100)).astype(self.dtype)
self.cond = np.zeros((100)).astype("bool")
def init_config(self):
self.op_type = "where"
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestXPUWhereOp2(TestXPUWhereOp):
def init_data(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
self.y = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
self.cond = np.ones((60, 2)).astype("bool")
class TestXPUWhereOp3(TestXPUWhereOp):
def init_data(self):
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
self.cond = np.array(
np.random.randint(
2, size=(20, 2, 4)), dtype=bool)
support_types = get_xpu_op_support_types('where')
for stype in support_types:
create_test_class(globals(), XPUTestWhereOp, stype)
class TestXPUWhereAPI(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册