未验证 提交 4745234f 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new way of test case, 2nd, *test=kunlun (#39478)

* new way of test case, 2nd, *test=kunlun

* new way of test case, 2nd, *test=kunlun

* new way of test case, 2nd, *test=kunlun
上级 7e7e9404
......@@ -36,7 +36,7 @@ ENDIF()
if(NOT DEFINED XPU_BASE_URL)
SET(XPU_BASE_URL_WITHOUT_DATE "https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev")
SET(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220116")
SET(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220119")
else()
SET(XPU_BASE_URL "${XPU_BASE_URL}")
endif()
......
......@@ -22,194 +22,107 @@ from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid as fluid
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
np.random.seed(10)
class TestExpandAsOpRank1(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(100).astype("float32")
target_tensor = np.random.rand(2, 100).astype("float32")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [2, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank2(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(10, 12).astype("float32")
target_tensor = np.random.rand(10, 12).astype("float32")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank3(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(2, 3, 20).astype("float32")
target_tensor = np.random.rand(2, 3, 20).astype("float32")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank4(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(1, 1, 7, 16).astype("float32")
target_tensor = np.random.rand(4, 6, 7, 16).astype("float32")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank5(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(1, 1, 7, 16).astype("int32")
target_tensor = np.random.rand(4, 6, 7, 16).astype("int32")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank6(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(1, 1, 7, 16).astype("int64")
target_tensor = np.random.rand(4, 6, 7, 16).astype("int64")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank6BOOL(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(1, 1, 7, 16).astype("bool")
target_tensor = np.random.rand(4, 6, 7, 16).astype("bool")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandAsOpRank6FP16(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_as_v2"
x = np.random.rand(1, 1, 7, 16).astype("float16")
target_tensor = np.random.rand(4, 6, 7, 16).astype("float16")
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class XPUTestExpandAsV2Op(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'expand_as_v2'
self.use_dynamic_create_class = False
class TestExpandAsV2XPUOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "expand_as_v2"
self.place = paddle.XPUPlace(0)
self.set_inputs()
self.set_output()
def init_dtype(self):
self.dtype = self.in_type
def set_inputs(self):
x = np.random.rand(100).astype(self.dtype)
self.inputs = {'X': x}
target_tensor = np.random.rand(2, 100).astype(self.dtype)
self.attrs = {'target_shape': target_tensor.shape}
def set_output(self):
bcast_dims = [2, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.in_type
def test_check_output(self):
self.check_output_with_place(self.place)
class TestExpandAsOpRank2(TestExpandAsV2XPUOp):
def set_inputs(self):
x = np.random.rand(10, 12).astype(self.dtype)
self.inputs = {'X': x}
target_tensor = np.random.rand(10, 12).astype(self.dtype)
self.attrs = {'target_shape': target_tensor.shape}
def set_output(self):
bcast_dims = [1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
class TestExpandAsOpRank3(TestExpandAsV2XPUOp):
def set_inputs(self):
x = np.random.rand(2, 3, 20).astype(self.dtype)
self.inputs = {'X': x}
target_tensor = np.random.rand(2, 3, 20).astype(self.dtype)
self.attrs = {'target_shape': target_tensor.shape}
def set_output(self):
bcast_dims = [1, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
class TestExpandAsOpRank4(TestExpandAsV2XPUOp):
def set_inputs(self):
x = np.random.rand(1, 1, 7, 16).astype(self.dtype)
self.inputs = {'X': x}
target_tensor = np.random.rand(1, 1, 7, 16).astype(self.dtype)
self.attrs = {'target_shape': target_tensor.shape}
def set_output(self):
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
class TestExpandAsOpRank5(TestExpandAsV2XPUOp):
def set_inputs(self):
x = np.random.rand(1, 1, 7, 16, 1).astype(self.dtype)
self.inputs = {'X': x}
target_tensor = np.random.rand(1, 1, 7, 16, 1).astype(self.dtype)
self.attrs = {'target_shape': target_tensor.shape}
def set_output(self):
bcast_dims = [4, 6, 1, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
class TestExpandAsOpRank6(TestExpandAsV2XPUOp):
def set_inputs(self):
x = np.random.rand(1, 1, 7, 16, 1, 1).astype(self.dtype)
self.inputs = {'X': x}
target_tensor = np.random.rand(1, 1, 7, 16, 1, 1).astype(self.dtype)
self.attrs = {'target_shape': target_tensor.shape}
def set_output(self):
bcast_dims = [4, 6, 1, 1, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
# Test python API
......@@ -236,5 +149,9 @@ class TestExpandAsV2API(unittest.TestCase):
assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1)))
support_types = get_xpu_op_support_types('expand_as_v2')
for stype in support_types:
create_test_class(globals(), XPUTestExpandAsV2Op, stype)
if __name__ == '__main__':
unittest.main()
......@@ -22,6 +22,7 @@ from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
np.random.seed(10)
......@@ -29,152 +30,132 @@ np.random.seed(10)
# CANN Op Support X: float32, int32, int64
# Situation 1: shape is a list(without tensor)
class TestExpandV2XPUOpRank1(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_v2"
self.dtype = np.float32
self.init_data()
self.inputs = {'X': np.random.random(self.ori_shape).astype(self.dtype)}
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def init_data(self):
self.ori_shape = [100]
self.shape = [100]
self.expand_times = [1]
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandV2OpRank2_DimExpanding(TestExpandV2XPUOpRank1):
def init_data(self):
self.ori_shape = [120]
self.shape = [2, 120]
self.expand_times = [2, 1]
class TestExpandV2OpRank2(TestExpandV2XPUOpRank1):
def init_data(self):
self.ori_shape = [1, 140]
self.shape = [12, 140]
self.expand_times = [12, 1]
class TestExpandV2OpRank3_Corner(TestExpandV2XPUOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.shape = (2, 10, 5)
self.expand_times = (1, 1, 1)
class TestExpandV2OpRank4(TestExpandV2XPUOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.shape = (-1, -1, -1, -1)
self.expand_times = (1, 1, 1, 1)
class TestExpandV2OpRank5(TestExpandV2XPUOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 1, 15)
self.shape = (2, -1, 4, -1)
self.expand_times = (1, 1, 4, 1)
class TestExpandV2OpRank6(TestExpandV2XPUOpRank1):
def init_data(self):
self.ori_shape = (4, 1, 30)
self.shape = (2, -1, 4, 30)
self.expand_times = (2, 1, 4, 1)
# Situation 2: shape is a list(with tensor)
class TestExpandV2OpXPURank1_tensor_attr(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_v2"
self.init_data()
self.dtype = np.float32
expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape):
expand_shapes_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.ori_shape).astype(self.dtype),
'expand_shapes_tensor': expand_shapes_tensor,
}
self.attrs = {"shape": self.infer_expand_shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def init_data(self):
self.ori_shape = [100]
self.expand_times = [1]
self.expand_shape = [100]
self.infer_expand_shape = [-1]
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestExpandV2OpRank2_Corner_tensor_attr(
TestExpandV2OpXPURank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.expand_times = [1, 1]
self.expand_shape = [12, 14]
self.infer_expand_shape = [12, -1]
# Situation 3: shape is a tensor
class TestExpandV2XPUOpRank1_tensor(XPUOpTest):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_v2"
self.init_data()
self.dtype = np.float32
self.inputs = {
'X': np.random.random(self.ori_shape).astype(self.dtype),
'Shape': np.array(self.expand_shape).astype("int32"),
}
self.attrs = {}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def set_xpu(self):
self.__class__.use_xpu = True
def init_data(self):
self.ori_shape = [100]
self.expand_times = [2, 1]
self.expand_shape = [2, 100]
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class XPUTestExpandV2Op(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'expand_v2'
self.use_dynamic_create_class = False
class TestExpandV2XPUOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "expand_v2"
self.place = paddle.XPUPlace(0)
self.init_data()
self.inputs = {
'X': np.random.random(self.ori_shape).astype(self.dtype)
}
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def init_dtype(self):
self.dtype = self.in_type
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
def init_data(self):
self.ori_shape = [100]
self.shape = [100]
self.expand_times = [1]
def test_check_output(self):
self.check_output_with_place(self.place)
class TestExpandV2OpRank2_DimExpanding(TestExpandV2XPUOp):
def init_data(self):
self.ori_shape = [120]
self.shape = [2, 120]
self.expand_times = [2, 1]
class TestExpandV2OpRank2(TestExpandV2XPUOp):
def init_data(self):
self.ori_shape = [1, 140]
self.shape = [12, 140]
self.expand_times = [12, 1]
class TestExpandV2OpRank3_Corner(TestExpandV2XPUOp):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.shape = (2, 10, 5)
self.expand_times = (1, 1, 1)
class TestExpandV2OpRank4(TestExpandV2XPUOp):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.shape = (-1, -1, -1, -1)
self.expand_times = (1, 1, 1, 1)
class TestExpandV2OpRank5(TestExpandV2XPUOp):
def init_data(self):
self.ori_shape = (2, 4, 1, 15)
self.shape = (2, -1, 4, -1)
self.expand_times = (1, 1, 4, 1)
class TestExpandV2OpRank6(TestExpandV2XPUOp):
def init_data(self):
self.ori_shape = (4, 1, 30)
self.shape = (2, -1, 4, 30)
self.expand_times = (2, 1, 4, 1)
# Situation 2: shape is a list(with tensor)
class TestExpandV2OpXPURank1_tensor_attr(TestExpandV2XPUOp):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_v2"
self.init_data()
self.dtype = np.float32
expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape):
expand_shapes_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.ori_shape).astype(self.dtype),
'expand_shapes_tensor': expand_shapes_tensor,
}
self.attrs = {"shape": self.infer_expand_shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [100]
self.expand_times = [1]
self.expand_shape = [100]
self.infer_expand_shape = [-1]
class TestExpandV2OpRank2_Corner_tensor_attr(
TestExpandV2OpXPURank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.expand_times = [1, 1]
self.expand_shape = [12, 14]
self.infer_expand_shape = [12, -1]
# Situation 3: shape is a tensor
class TestExpandV2XPUOp_tensor(TestExpandV2XPUOp):
def setUp(self):
self.set_xpu()
self.place = paddle.XPUPlace(0)
self.op_type = "expand_v2"
self.init_data()
self.dtype = np.float32
self.inputs = {
'X': np.random.random(self.ori_shape).astype(self.dtype),
'Shape': np.array(self.expand_shape).astype("int32"),
}
self.attrs = {}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [100]
self.expand_times = [2, 1]
self.expand_shape = [2, 100]
# Situation 5: input x is int32
......@@ -206,21 +187,6 @@ class TestExpandV2OpInteger(XPUOpTest):
pass
class TesstExpandV2OpInt64(TestExpandV2OpInteger):
def init_dtype(self):
self.dtype = 'int64'
class TesstExpandV2OpBool(TestExpandV2OpInteger):
def init_dtype(self):
self.dtype = 'bool'
class TesstExpandV2OpFP16(TestExpandV2OpInteger):
def init_dtype(self):
self.dtype = 'float16'
# Test python API
class TestExpandV2API(unittest.TestCase):
def test_static(self):
......@@ -259,5 +225,9 @@ class TestExpandV2API(unittest.TestCase):
assert np.array_equal(res_3, np.tile(input, (1, 1)))
support_types = get_xpu_op_support_types('expand_v2')
for stype in support_types:
create_test_class(globals(), XPUTestExpandV2Op, stype)
if __name__ == "__main__":
unittest.main()
......@@ -26,52 +26,66 @@ import unittest
import numpy as np
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class TestFillAnyLikeOp(OpTest):
def setUp(self):
self.op_type = "fill_any_like"
self.dtype = np.float32
self.use_xpu = True
self.use_mkldnn = False
self.value = 0.0
self.init()
self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)}
self.attrs = {'value': self.value, 'use_xpu': True}
self.outputs = {'Out': self.value * np.ones_like(self.inputs["X"])}
class XPUTestFillAnyLikeOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'fill_any_like'
self.use_dynamic_create_class = False
def init(self):
pass
class TestFillAnyLikeOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "fill_any_like"
self.place = paddle.XPUPlace(0)
self.set_value()
self.set_input()
self.attrs = {'value': self.value, 'use_xpu': True}
self.outputs = {'Out': self.value * np.ones_like(self.inputs["X"])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def init_dtype(self):
self.dtype = self.in_type
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
def init(self):
self.dtype = np.float32
self.value = 0.0
def set_input(self):
self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)}
def set_value(self):
self.value = 0.0
class TestFillAnyLikeOpValue1(TestFillAnyLikeOp):
def init(self):
self.value = 1.0
def test_check_output(self):
self.check_output_with_place(self.place)
class TestFillAnyLikeOp2(TestFillAnyLikeOp):
def set_value(self):
self.value = -0.0
class TestFillAnyLikeOpValue2(TestFillAnyLikeOp):
def init(self):
self.value = 1e-9
class TestFillAnyLikeOp3(TestFillAnyLikeOp):
def set_value(self):
self.value = 1.0
class TestFillAnyLikeOp4(TestFillAnyLikeOp):
def init(self):
self.value = 1e-9
class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp):
def init(self):
self.dtype = np.float16
self.value = 0.05
class TestFillAnyLikeOp5(TestFillAnyLikeOp):
def set_value(self):
if self.dtype == "float16":
self.value = 0.05
else:
self.value = 5.0
support_types = get_xpu_op_support_types('fill_any_like')
for stype in support_types:
create_test_class(globals(), XPUTestFillAnyLikeOp, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册