未验证 提交 267d3191 编写于 作者: E enzodechine 提交者: GitHub

Re-write the unit tests for compare xpu op (#43460)

* re-write the unit tests for compare xpu op

*test=kunlun

* re-write the unit tests for compare xpu op

*test=kunlun
Co-authored-by: Nrunzhech <runzh_chen@sjtu.edu.cn>
上级 8571833f
...@@ -12,282 +12,393 @@ ...@@ -12,282 +12,393 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import sys import sys
sys.path.append("..") sys.path.append("..")
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
from op_test_xpu import OpTest, XPUOpTest
import paddle import paddle
from paddle.fluid import Program, program_guard from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types
from xpu.get_test_cover_info import XPUOpTestWrapper
def create_test_class(op_type, typename, callback):
class TestCompareOpBase(XPUOpTest):
class Cls(OpTest):
def setUp(self):
def setUp(self): self.place = paddle.XPUPlace(0)
a = np.random.random(size=(10, 7)).astype(typename) self.config()
b = np.random.random(size=(10, 7)).astype(typename) self.set_case()
c = callback(a, b) self.inputs = {'X': self.x, 'Y': self.y}
self.inputs = {'X': a, 'Y': b} self.outputs = {'Out': self.result}
self.outputs = {'Out': c}
self.op_type = op_type def set_case(self):
self.use_xpu = True self.x = np.random.uniform(self.lbound, self.hbound,
self.attrs = {'use_xpu': True} self.x_shape).astype(self.dtype)
self.y = np.random.uniform(self.lbound, self.hbound,
def test_check_output(self): self.y_shape).astype(self.dtype)
paddle.enable_static() self.result = self.compute(self.x, self.y)
place = paddle.XPUPlace(0)
self.check_output_with_place(place) def config(self):
self.dtype = np.float32
def test_errors(self): self.op_type = 'less_than'
paddle.enable_static() self.compute = np.less
with program_guard(Program(), Program()): self.lbound = -100
x = fluid.layers.data(name='x', shape=[2], dtype='int32') self.hbound = 100
y = fluid.layers.data(name='y', shape=[2], dtype='int32') self.x_shape = [11, 17]
a = fluid.layers.data(name='a', shape=[2], dtype='int16') self.y_shape = [11, 17]
if self.op_type == "less_than":
self.assertRaises(TypeError, def test_check_output(self):
fluid.layers.less_than, paddle.enable_static()
x=x, self.check_output_with_place(self.place)
y=y,
force_cpu=1)
op = eval("fluid.layers.%s" % self.op_type) class XPUTestLessThanOP(XPUOpTestWrapper):
self.assertRaises(TypeError, op, x=x, y=y, cond=1)
self.assertRaises(TypeError, op, x=x, y=a) def __init__(self):
self.assertRaises(TypeError, op, x=a, y=y) self.op_name = 'less_than'
self.use_dynamic_create_class = False
cls_name = "{0}_{1}".format(op_type, typename)
Cls.__name__ = cls_name class LessThanOpTestCase1(TestCompareOpBase):
globals()[cls_name] = Cls
def config(self):
self.dtype = self.in_type
for _type_name in {'int32'}: self.op_type = 'less_than'
if _type_name == 'float64' and core.is_compiled_with_rocm(): self.compute = np.less
_type_name = 'float32' self.set_data()
create_test_class('less_than', _type_name, lambda _a, _b: _a < _b) def set_data(self):
create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b) self.lbound = -100
create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b) self.hbound = 100
create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b) self.x_shape = [11, 17]
create_test_class('equal', _type_name, lambda _a, _b: _a == _b) self.y_shape = [11, 17]
create_test_class('not_equal', _type_name, lambda _a, _b: _a != _b)
class LessThanOpTestCase2(LessThanOpTestCase1):
def create_paddle_case(op_type, callback): def set_data(self):
self.lbound = -200
class PaddleCls(unittest.TestCase): self.hbound = 200
self.x_shape = [11, 17]
def setUp(self): self.y_shape = [1]
self.op_type = op_type
self.input_x = np.array([1, 2, 3, 4]).astype(np.int64) class LessThanOpTestCase3(LessThanOpTestCase1):
self.input_y = np.array([1, 3, 2, 4]).astype(np.int64)
self.real_result = callback(self.input_x, self.input_y) def set_data(self):
self.place = fluid.XPUPlace( self.lbound = -300
0) if fluid.core.is_compiled_with_xpu() else fluid.CPUPlace() self.hbound = 300
self.x_shape = [11, 17, 29]
def test_api(self): self.y_shape = [1]
paddle.enable_static()
with program_guard(Program(), Program()): class LessThanOpTestCase4(LessThanOpTestCase1):
x = fluid.data(name='x', shape=[4], dtype='int64')
y = fluid.data(name='y', shape=[4], dtype='int64') def set_data(self):
op = eval("paddle.%s" % (self.op_type)) self.lbound = -200
out = op(x, y) self.hbound = 200
exe = fluid.Executor(self.place) self.x_shape = [128, 128, 512]
res, = exe.run(feed={ self.y_shape = [1]
"x": self.input_x,
"y": self.input_y class LessThanOpTestCase5(LessThanOpTestCase1):
},
fetch_list=[out]) def set_data(self):
self.assertEqual((res == self.real_result).all(), True) self.lbound = -100
self.hbound = 100
def test_api_float(self): self.x_shape = [128, 128, 512]
if self.op_type == "equal": self.y_shape = [128, 128, 512]
paddle.enable_static()
with program_guard(Program(), Program()):
x = fluid.data(name='x', shape=[4], dtype='int64') support_types = get_xpu_op_support_types('less_than')
y = fluid.data(name='y', shape=[1], dtype='int64') for stype in support_types:
op = eval("paddle.%s" % (self.op_type)) create_test_class(globals(), XPUTestLessThanOP, stype)
out = op(x, y)
exe = fluid.Executor(self.place)
res, = exe.run(feed={ class XPUTestLessEqualOp(XPUOpTestWrapper):
"x": self.input_x,
"y": 1.0 def __init__(self):
}, self.op_name = 'less_equal'
fetch_list=[out]) self.use_dynamic_create_class = False
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64)
self.assertEqual((res == self.real_result).all(), True) class LessEqualOpTestCase1(TestCompareOpBase):
def test_dynamic_api(self): def config(self):
paddle.disable_static() self.dtype = self.in_type
x = paddle.to_tensor(self.input_x) self.op_type = 'less_equal'
y = paddle.to_tensor(self.input_y) self.compute = np.less_equal
op = eval("paddle.%s" % (self.op_type)) self.set_data()
out = op(x, y)
self.assertEqual((out.numpy() == self.real_result).all(), True) def set_data(self):
paddle.enable_static() self.lbound = -100
self.hbound = 100
def test_dynamic_api_int(self): self.x_shape = [11, 17]
if self.op_type == "equal": self.y_shape = [11, 17]
paddle.disable_static()
x = paddle.to_tensor(self.input_x) class LessEqualOpTestCase2(LessEqualOpTestCase1):
op = eval("paddle.%s" % (self.op_type))
out = op(x, 1) def set_data(self):
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64) self.lbound = -100
self.assertEqual((out.numpy() == self.real_result).all(), True) self.hbound = 100
paddle.enable_static() self.x_shape = [11, 17, 255]
self.y_shape = [11, 17, 255]
def test_dynamic_api_float(self):
if self.op_type == "equal": class LessEqualOpTestCase3(LessEqualOpTestCase1):
paddle.disable_static()
x = paddle.to_tensor(self.input_x) def set_data(self):
op = eval("paddle.%s" % (self.op_type)) self.lbound = -200
out = op(x, 1.0) self.hbound = 200
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64) self.x_shape = [11, 17, 255]
self.assertEqual((out.numpy() == self.real_result).all(), True) self.y_shape = [1]
paddle.enable_static()
class LessEqualOpTestCase4(LessEqualOpTestCase1):
def test_assert(self):
def set_data(self):
def test_dynamic_api_string(self): self.lbound = -200
if self.op_type == "equal": self.hbound = 200
paddle.disable_static() self.x_shape = [11, 17]
x = paddle.to_tensor(self.input_x) self.y_shape = [1]
op = eval("paddle.%s" % (self.op_type))
out = op(x, "1.0") class LessEqualOpTestCase5(LessEqualOpTestCase1):
paddle.enable_static()
def set_data(self):
self.assertRaises(TypeError, test_dynamic_api_string) self.lbound = -200
self.hbound = 200
def test_dynamic_api_bool(self): self.x_shape = [128, 128, 512]
if self.op_type == "equal": self.y_shape = [128, 128, 512]
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
op = eval("paddle.%s" % (self.op_type)) support_types = get_xpu_op_support_types('less_equal')
out = op(x, True) for stype in support_types:
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64) create_test_class(globals(), XPUTestLessEqualOp, stype)
self.assertEqual((out.numpy() == self.real_result).all(), True)
paddle.enable_static()
class XPUTestGreaterThanOp(XPUOpTestWrapper):
def test_broadcast_api_1(self):
paddle.enable_static() def __init__(self):
with program_guard(Program(), Program()): self.op_name = 'greater_than'
x = paddle.static.data(name='x', self.use_dynamic_create_class = False
shape=[1, 2, 1, 3],
dtype='int32') class GreaterThanOpTestCase1(TestCompareOpBase):
y = paddle.static.data(name='y', shape=[1, 2, 3], dtype='int32')
op = eval("paddle.%s" % (self.op_type)) def config(self):
out = op(x, y) self.dtype = self.in_type
exe = paddle.static.Executor(self.place) self.op_type = 'greater_than'
input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32) self.compute = np.greater
input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32) self.set_data()
real_result = callback(input_x, input_y)
res, = exe.run(feed={ def set_data(self):
"x": input_x, self.lbound = -200
"y": input_y self.hbound = 200
}, self.x_shape = [128, 128, 512]
fetch_list=[out]) self.y_shape = [128, 128, 512]
self.assertEqual((res == real_result).all(), True)
class GreaterThanOpTestCase2(GreaterThanOpTestCase1):
def test_broadcast_api_2(self):
paddle.enable_static() def set_data(self):
with program_guard(Program(), Program()): self.lbound = -100
x = paddle.static.data(name='x', shape=[1, 2, 3], dtype='int32') self.hbound = 100
y = paddle.static.data(name='y', self.x_shape = [128, 128, 512]
shape=[1, 2, 1, 3], self.y_shape = [1]
dtype='int32')
op = eval("paddle.%s" % (self.op_type)) class GreaterThanOpTestCase3(GreaterThanOpTestCase1):
out = op(x, y)
exe = paddle.static.Executor(self.place) def set_data(self):
input_x = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32) self.lbound = -100
input_y = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32) self.hbound = 100
real_result = callback(input_x, input_y) self.x_shape = [11, 17]
res, = exe.run(feed={ self.y_shape = [1]
"x": input_x,
"y": input_y class GreaterThanOpTestCase4(GreaterThanOpTestCase1):
},
fetch_list=[out]) def set_data(self):
self.assertEqual((res == real_result).all(), True) self.lbound = -100
self.hbound = 100
def test_broadcast_api_3(self): self.x_shape = [11, 17]
paddle.enable_static() self.y_shape = [11, 17]
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[5], dtype='int32') class GreaterThanOpTestCase5(GreaterThanOpTestCase1):
y = paddle.static.data(name='y', shape=[3, 1], dtype='int32')
op = eval("paddle.%s" % (self.op_type)) def set_data(self):
out = op(x, y) self.lbound = -100
exe = paddle.static.Executor(self.place) self.hbound = 100
input_x = np.arange(0, 5).reshape((5)).astype(np.int32) self.x_shape = [10, 10, 20, 20]
input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(np.int32) self.y_shape = [10, 10, 20, 20]
real_result = callback(input_x, input_y)
res, = exe.run(feed={
"x": input_x, support_types = get_xpu_op_support_types('greater_than')
"y": input_y for stype in support_types:
}, create_test_class(globals(), XPUTestGreaterThanOp, stype)
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
class XPUTestGreaterEqualOp(XPUOpTestWrapper):
def test_bool_api_4(self):
paddle.enable_static() def __init__(self):
with program_guard(Program(), Program()): self.op_name = 'greater_equal'
x = paddle.static.data(name='x', shape=[3, 1], dtype='bool') self.use_dynamic_create_class = False
y = paddle.static.data(name='y', shape=[3, 1], dtype='bool')
op = eval("paddle.%s" % (self.op_type)) class GreaterEqualOpTestCase1(TestCompareOpBase):
out = op(x, y)
exe = paddle.static.Executor(self.place) def config(self):
input_x = np.array([True, False, True]).astype(np.bool_) self.dtype = self.in_type
input_y = np.array([True, True, False]).astype(np.bool_) self.op_type = 'greater_equal'
real_result = callback(input_x, input_y) self.compute = np.greater_equal
res, = exe.run(feed={ self.set_data()
"x": input_x,
"y": input_y def set_data(self):
}, self.lbound = -100
fetch_list=[out]) self.hbound = 100
self.assertEqual((res == real_result).all(), True) self.x_shape = [10, 10, 20, 20]
self.y_shape = [10, 10, 20, 20]
def test_bool_broadcast_api_4(self):
paddle.enable_static() class GreaterEqualOpTestCase2(GreaterEqualOpTestCase1):
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[3, 1], dtype='bool') def set_data(self):
y = paddle.static.data(name='y', shape=[1], dtype='bool') self.lbound = -100
op = eval("paddle.%s" % (self.op_type)) self.hbound = 100
out = op(x, y) self.x_shape = [10, 10]
exe = paddle.static.Executor(self.place) self.y_shape = [10, 10]
input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True]).astype(np.bool_) class GreaterEqualOpTestCase3(GreaterEqualOpTestCase1):
real_result = callback(input_x, input_y)
res, = exe.run(feed={ def set_data(self):
"x": input_x, self.lbound = -200
"y": input_y self.hbound = 200
}, self.x_shape = [512, 512, 2]
fetch_list=[out]) self.y_shape = [1]
self.assertEqual((res == real_result).all(), True)
class GreaterEqualOpTestCase4(GreaterEqualOpTestCase1):
def test_attr_name(self):
paddle.enable_static() def set_data(self):
with program_guard(Program(), Program()): self.lbound = -100
x = fluid.layers.data(name='x', shape=[4], dtype='int32') self.hbound = 100
y = fluid.layers.data(name='y', shape=[4], dtype='int32') self.x_shape = [10, 10, 20, 20]
op = eval("paddle.%s" % (self.op_type)) self.y_shape = [1]
out = op(x=x, y=y, name="name_%s" % (self.op_type))
self.assertEqual("name_%s" % (self.op_type) in out.name, True) class GreaterEqualOpTestCase5(GreaterEqualOpTestCase1):
cls_name = "TestCase_{}".format(op_type) def set_data(self):
PaddleCls.__name__ = cls_name self.lbound = -100
globals()[cls_name] = PaddleCls self.hbound = 100
self.x_shape = [10, 30, 15]
self.y_shape = [10, 30, 15]
create_paddle_case('less_than', lambda _a, _b: _a < _b)
create_paddle_case('less_equal', lambda _a, _b: _a <= _b)
create_paddle_case('greater_than', lambda _a, _b: _a > _b) support_types = get_xpu_op_support_types('greater_equal')
create_paddle_case('greater_equal', lambda _a, _b: _a >= _b) for stype in support_types:
create_paddle_case('equal', lambda _a, _b: _a == _b) create_test_class(globals(), XPUTestGreaterEqualOp, stype)
create_paddle_case('not_equal', lambda _a, _b: _a != _b)
class XPUTestEqualOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'equal'
self.use_dynamic_create_class = False
class EqualOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'equal'
self.compute = np.equal
self.set_data()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 30, 15]
self.y_shape = [10, 30, 15]
class EqualOpTestCase2(EqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 30, 15]
self.y_shape = [1]
class EqualOpTestCase3(EqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [10, 30]
self.y_shape = [10, 30]
class EqualOpTestCase4(EqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [256, 256, 10]
self.y_shape = [256, 256, 10]
class EqualOpTestCase5(EqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [1]
support_types = get_xpu_op_support_types('equal')
for stype in support_types:
create_test_class(globals(), XPUTestEqualOp, stype)
class XPUTestNotEqualOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'not_equal'
self.use_dynamic_create_class = False
class NotEqualOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'not_equal'
self.compute = np.not_equal
self.set_data()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [1]
class NotEqualOpTestCase2(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17]
self.y_shape = [11, 17]
class NotEqualOpTestCase3(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17, 30]
self.y_shape = [1]
class NotEqualOpTestCase4(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [256, 256, 10]
self.y_shape = [256, 256, 10]
class NotEqualOpTestCase5(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [512, 128]
self.y_shape = [512, 128]
support_types = get_xpu_op_support_types('not_equal')
for stype in support_types:
create_test_class(globals(), XPUTestNotEqualOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册