未验证 提交 267d3191 编写于 作者: E enzodechine 提交者: GitHub

Re-write the unit tests for compare xpu op (#43460)

* re-write the unit tests for compare xpu op

*test=kunlun

* re-write the unit tests for compare xpu op

*test=kunlun
Co-authored-by: Nrunzhech <runzh_chen@sjtu.edu.cn>
上级 8571833f
......@@ -12,282 +12,393 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test_xpu import OpTest, XPUOpTest
from op_test_xpu import XPUOpTest
import paddle
from paddle.fluid import Program, program_guard
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types
from xpu.get_test_cover_info import XPUOpTestWrapper
def create_test_class(op_type, typename, callback):
class Cls(OpTest):
class TestCompareOpBase(XPUOpTest):
def setUp(self):
a = np.random.random(size=(10, 7)).astype(typename)
b = np.random.random(size=(10, 7)).astype(typename)
c = callback(a, b)
self.inputs = {'X': a, 'Y': b}
self.outputs = {'Out': c}
self.op_type = op_type
self.use_xpu = True
self.attrs = {'use_xpu': True}
self.place = paddle.XPUPlace(0)
self.config()
self.set_case()
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': self.result}
def set_case(self):
self.x = np.random.uniform(self.lbound, self.hbound,
self.x_shape).astype(self.dtype)
self.y = np.random.uniform(self.lbound, self.hbound,
self.y_shape).astype(self.dtype)
self.result = self.compute(self.x, self.y)
def config(self):
self.dtype = np.float32
self.op_type = 'less_than'
self.compute = np.less
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [11, 17]
def test_check_output(self):
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(self.place)
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[2], dtype='int32')
y = fluid.layers.data(name='y', shape=[2], dtype='int32')
a = fluid.layers.data(name='a', shape=[2], dtype='int16')
if self.op_type == "less_than":
self.assertRaises(TypeError,
fluid.layers.less_than,
x=x,
y=y,
force_cpu=1)
op = eval("fluid.layers.%s" % self.op_type)
self.assertRaises(TypeError, op, x=x, y=y, cond=1)
self.assertRaises(TypeError, op, x=x, y=a)
self.assertRaises(TypeError, op, x=a, y=y)
cls_name = "{0}_{1}".format(op_type, typename)
Cls.__name__ = cls_name
globals()[cls_name] = Cls
for _type_name in {'int32'}:
if _type_name == 'float64' and core.is_compiled_with_rocm():
_type_name = 'float32'
create_test_class('less_than', _type_name, lambda _a, _b: _a < _b)
create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b)
create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b)
create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b)
create_test_class('equal', _type_name, lambda _a, _b: _a == _b)
create_test_class('not_equal', _type_name, lambda _a, _b: _a != _b)
def create_paddle_case(op_type, callback):
class PaddleCls(unittest.TestCase):
def setUp(self):
self.op_type = op_type
self.input_x = np.array([1, 2, 3, 4]).astype(np.int64)
self.input_y = np.array([1, 3, 2, 4]).astype(np.int64)
self.real_result = callback(self.input_x, self.input_y)
self.place = fluid.XPUPlace(
0) if fluid.core.is_compiled_with_xpu() else fluid.CPUPlace()
def test_api(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = fluid.data(name='x', shape=[4], dtype='int64')
y = fluid.data(name='y', shape=[4], dtype='int64')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = fluid.Executor(self.place)
res, = exe.run(feed={
"x": self.input_x,
"y": self.input_y
},
fetch_list=[out])
self.assertEqual((res == self.real_result).all(), True)
def test_api_float(self):
if self.op_type == "equal":
paddle.enable_static()
with program_guard(Program(), Program()):
x = fluid.data(name='x', shape=[4], dtype='int64')
y = fluid.data(name='y', shape=[1], dtype='int64')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = fluid.Executor(self.place)
res, = exe.run(feed={
"x": self.input_x,
"y": 1.0
},
fetch_list=[out])
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64)
self.assertEqual((res == self.real_result).all(), True)
def test_dynamic_api(self):
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
y = paddle.to_tensor(self.input_y)
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
self.assertEqual((out.numpy() == self.real_result).all(), True)
paddle.enable_static()
class XPUTestLessThanOP(XPUOpTestWrapper):
def test_dynamic_api_int(self):
if self.op_type == "equal":
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
op = eval("paddle.%s" % (self.op_type))
out = op(x, 1)
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64)
self.assertEqual((out.numpy() == self.real_result).all(), True)
paddle.enable_static()
def __init__(self):
self.op_name = 'less_than'
self.use_dynamic_create_class = False
def test_dynamic_api_float(self):
if self.op_type == "equal":
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
op = eval("paddle.%s" % (self.op_type))
out = op(x, 1.0)
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64)
self.assertEqual((out.numpy() == self.real_result).all(), True)
paddle.enable_static()
class LessThanOpTestCase1(TestCompareOpBase):
def test_assert(self):
def config(self):
self.dtype = self.in_type
self.op_type = 'less_than'
self.compute = np.less
self.set_data()
def test_dynamic_api_string(self):
if self.op_type == "equal":
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
op = eval("paddle.%s" % (self.op_type))
out = op(x, "1.0")
paddle.enable_static()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [11, 17]
self.assertRaises(TypeError, test_dynamic_api_string)
class LessThanOpTestCase2(LessThanOpTestCase1):
def test_dynamic_api_bool(self):
if self.op_type == "equal":
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
op = eval("paddle.%s" % (self.op_type))
out = op(x, True)
self.real_result = np.array([1, 0, 0, 0]).astype(np.int64)
self.assertEqual((out.numpy() == self.real_result).all(), True)
paddle.enable_static()
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17]
self.y_shape = [1]
def test_broadcast_api_1(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x',
shape=[1, 2, 1, 3],
dtype='int32')
y = paddle.static.data(name='y', shape=[1, 2, 3], dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32)
input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32)
real_result = callback(input_x, input_y)
res, = exe.run(feed={
"x": input_x,
"y": input_y
},
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
def test_broadcast_api_2(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[1, 2, 3], dtype='int32')
y = paddle.static.data(name='y',
shape=[1, 2, 1, 3],
dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32)
input_y = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32)
real_result = callback(input_x, input_y)
res, = exe.run(feed={
"x": input_x,
"y": input_y
},
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
def test_broadcast_api_3(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[5], dtype='int32')
y = paddle.static.data(name='y', shape=[3, 1], dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.arange(0, 5).reshape((5)).astype(np.int32)
input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(np.int32)
real_result = callback(input_x, input_y)
res, = exe.run(feed={
"x": input_x,
"y": input_y
},
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
def test_bool_api_4(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[3, 1], dtype='bool')
y = paddle.static.data(name='y', shape=[3, 1], dtype='bool')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True, True, False]).astype(np.bool_)
real_result = callback(input_x, input_y)
res, = exe.run(feed={
"x": input_x,
"y": input_y
},
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
def test_bool_broadcast_api_4(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[3, 1], dtype='bool')
y = paddle.static.data(name='y', shape=[1], dtype='bool')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True]).astype(np.bool_)
real_result = callback(input_x, input_y)
res, = exe.run(feed={
"x": input_x,
"y": input_y
},
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
def test_attr_name(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[4], dtype='int32')
y = fluid.layers.data(name='y', shape=[4], dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x=x, y=y, name="name_%s" % (self.op_type))
self.assertEqual("name_%s" % (self.op_type) in out.name, True)
cls_name = "TestCase_{}".format(op_type)
PaddleCls.__name__ = cls_name
globals()[cls_name] = PaddleCls
create_paddle_case('less_than', lambda _a, _b: _a < _b)
create_paddle_case('less_equal', lambda _a, _b: _a <= _b)
create_paddle_case('greater_than', lambda _a, _b: _a > _b)
create_paddle_case('greater_equal', lambda _a, _b: _a >= _b)
create_paddle_case('equal', lambda _a, _b: _a == _b)
create_paddle_case('not_equal', lambda _a, _b: _a != _b)
class LessThanOpTestCase3(LessThanOpTestCase1):
def set_data(self):
self.lbound = -300
self.hbound = 300
self.x_shape = [11, 17, 29]
self.y_shape = [1]
class LessThanOpTestCase4(LessThanOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [128, 128, 512]
self.y_shape = [1]
class LessThanOpTestCase5(LessThanOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [128, 128, 512]
self.y_shape = [128, 128, 512]
support_types = get_xpu_op_support_types('less_than')
for stype in support_types:
create_test_class(globals(), XPUTestLessThanOP, stype)
class XPUTestLessEqualOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'less_equal'
self.use_dynamic_create_class = False
class LessEqualOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'less_equal'
self.compute = np.less_equal
self.set_data()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [11, 17]
class LessEqualOpTestCase2(LessEqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17, 255]
self.y_shape = [11, 17, 255]
class LessEqualOpTestCase3(LessEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17, 255]
self.y_shape = [1]
class LessEqualOpTestCase4(LessEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17]
self.y_shape = [1]
class LessEqualOpTestCase5(LessEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [128, 128, 512]
self.y_shape = [128, 128, 512]
support_types = get_xpu_op_support_types('less_equal')
for stype in support_types:
create_test_class(globals(), XPUTestLessEqualOp, stype)
class XPUTestGreaterThanOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'greater_than'
self.use_dynamic_create_class = False
class GreaterThanOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'greater_than'
self.compute = np.greater
self.set_data()
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [128, 128, 512]
self.y_shape = [128, 128, 512]
class GreaterThanOpTestCase2(GreaterThanOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [128, 128, 512]
self.y_shape = [1]
class GreaterThanOpTestCase3(GreaterThanOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [1]
class GreaterThanOpTestCase4(GreaterThanOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [11, 17]
class GreaterThanOpTestCase5(GreaterThanOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 10, 20, 20]
self.y_shape = [10, 10, 20, 20]
support_types = get_xpu_op_support_types('greater_than')
for stype in support_types:
create_test_class(globals(), XPUTestGreaterThanOp, stype)
class XPUTestGreaterEqualOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'greater_equal'
self.use_dynamic_create_class = False
class GreaterEqualOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'greater_equal'
self.compute = np.greater_equal
self.set_data()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 10, 20, 20]
self.y_shape = [10, 10, 20, 20]
class GreaterEqualOpTestCase2(GreaterEqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 10]
self.y_shape = [10, 10]
class GreaterEqualOpTestCase3(GreaterEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [512, 512, 2]
self.y_shape = [1]
class GreaterEqualOpTestCase4(GreaterEqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 10, 20, 20]
self.y_shape = [1]
class GreaterEqualOpTestCase5(GreaterEqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 30, 15]
self.y_shape = [10, 30, 15]
support_types = get_xpu_op_support_types('greater_equal')
for stype in support_types:
create_test_class(globals(), XPUTestGreaterEqualOp, stype)
class XPUTestEqualOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'equal'
self.use_dynamic_create_class = False
class EqualOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'equal'
self.compute = np.equal
self.set_data()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 30, 15]
self.y_shape = [10, 30, 15]
class EqualOpTestCase2(EqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [10, 30, 15]
self.y_shape = [1]
class EqualOpTestCase3(EqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [10, 30]
self.y_shape = [10, 30]
class EqualOpTestCase4(EqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [256, 256, 10]
self.y_shape = [256, 256, 10]
class EqualOpTestCase5(EqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [1]
support_types = get_xpu_op_support_types('equal')
for stype in support_types:
create_test_class(globals(), XPUTestEqualOp, stype)
class XPUTestNotEqualOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'not_equal'
self.use_dynamic_create_class = False
class NotEqualOpTestCase1(TestCompareOpBase):
def config(self):
self.dtype = self.in_type
self.op_type = 'not_equal'
self.compute = np.not_equal
self.set_data()
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [11, 17]
self.y_shape = [1]
class NotEqualOpTestCase2(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17]
self.y_shape = [11, 17]
class NotEqualOpTestCase3(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [11, 17, 30]
self.y_shape = [1]
class NotEqualOpTestCase4(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -200
self.hbound = 200
self.x_shape = [256, 256, 10]
self.y_shape = [256, 256, 10]
class NotEqualOpTestCase5(NotEqualOpTestCase1):
def set_data(self):
self.lbound = -100
self.hbound = 100
self.x_shape = [512, 128]
self.y_shape = [512, 128]
support_types = get_xpu_op_support_types('not_equal')
for stype in support_types:
create_test_class(globals(), XPUTestNotEqualOp, stype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册