未验证 提交 c8d6c146 编写于 作者: Z zhangxiaoci 提交者: GitHub

refactor reshape2/shape unittest for kunlun (#39665)

*test=kunlun
上级 0efa64c8
......@@ -14,34 +14,52 @@
from __future__ import print_function
import unittest
import numpy as np
import sys
import unittest
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
class XPUTestReshapeOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = "reshape2"
self.use_dynamic_create_class = False
# situation 1: have shape( list, no tensor), no actual shape(Tensor)
class TestReshapeOp(OpTest):
# situation 1: have shape( list, no tensor), no actual shape(Tensor)
class TestReshapeOp(XPUOpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.attrs = {"shape": self.new_shape, "use_xpu": True}
self.outputs = {
"Out": self.inputs["X"].reshape(self.infered_shape),
'XShape': np.random.random(self.ori_shape).astype("float32")
}
self.init_test_input()
self.init_test_output()
self.init_attrs()
def init_data(self):
self.ori_shape = (2, 60)
self.new_shape = (12, 10)
self.infered_shape = (12, 10)
def init_test_input(self):
self.inputs = {
"X": np.random.random(self.ori_shape).astype(self.dtype)
}
def init_test_output(self):
self.outputs = {
"Out": self.inputs["X"].reshape(self.infered_shape),
'XShape': np.random.random(self.ori_shape).astype(self.dtype)
}
def init_attrs(self):
self.attrs = {"shape": self.new_shape, "use_xpu": True}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
......@@ -52,150 +70,101 @@ class TestReshapeOp(OpTest):
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out")
class TestReshapeOpDimInfer1(TestReshapeOp):
class TestReshapeOpDimInfer1(TestReshapeOp):
def init_data(self):
self.ori_shape = (5, 25)
self.new_shape = (5, -1, 5)
self.infered_shape = (5, -1, 5)
class TestReshapeOpDimInfer2(TestReshapeOp):
class TestReshapeOpDimInfer2(TestReshapeOp):
def init_data(self):
self.ori_shape = (10, 2, 6)
self.new_shape = (10, 0, 3, -1)
self.infered_shape = (10, 2, 3, -1)
# situation 2: have shape(list, no tensor), have actual shape(Tensor)
class TestReshapeOpWithInputShape(TestReshapeOp):
def init_data(self):
self.ori_shape = (6, 20)
self.new_shape = (0, -1, 20)
self.actual_shape = (2, 3, 20)
# situation 2: have shape(list, no tensor), have actual shape(Tensor)
class TestReshapeOpWithInputShape(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
def init_test_input(self):
self.inputs = {
"X": np.random.random(self.ori_shape).astype("float32"),
"X": np.random.random(self.ori_shape).astype(self.dtype),
"Shape": np.array(
self.actual_shape, dtype="int32")
}
self.attrs = {"shape": self.new_shape, "use_xpu": True}
def init_test_output(self):
self.outputs = {
"Out": self.inputs["X"].reshape(self.actual_shape),
'XShape': np.random.random(self.ori_shape).astype("float32")
'XShape': np.random.random(self.ori_shape).astype(self.dtype)
}
# Situation 3: have shape(list, have tensor), no actual shape(Tensor)
class TestReshapeOp_attr_ShapeTensor(TestReshapeOp):
def init_data(self):
self.ori_shape = (6, 20)
self.new_shape = (0, -1, 20)
self.actual_shape = (2, 3, 20)
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, no_check_set=['XShape'])
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out")
# Situation 3: have shape(list, have tensor), no actual shape(Tensor)
class TestReshapeOp_attr_ShapeTensor(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.ori_shape = (4, 25)
self.new_shape = (10, 10)
self.infered_shape = (10, 10)
self.shape = (-1, -1)
def init_test_input(self):
shape_tensor = []
for index, ele in enumerate(self.new_shape):
shape_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
"X": np.random.random(self.ori_shape).astype("float32"),
"X": np.random.random(self.ori_shape).astype(self.dtype),
'ShapeTensor': shape_tensor
}
self.attrs = {'shape': self.shape, "use_xpu": True}
self.outputs = {
"Out": self.inputs["X"].reshape(self.infered_shape),
'XShape': np.random.random(self.ori_shape).astype("float32")
}
def init_data(self):
self.ori_shape = (4, 25)
self.new_shape = (10, 10)
self.infered_shape = (10, 10)
self.shape = (-1, -1)
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, no_check_set=['XShape'])
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out")
def init_attrs(self):
self.attrs = {'shape': self.shape, "use_xpu": True}
class TestReshapeOpDimInfer1_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor):
class TestReshapeOpDimInfer1_attr_ShapeTensor(
TestReshapeOp_attr_ShapeTensor):
def init_data(self):
self.ori_shape = (5, 20)
self.new_shape = (5, -1, 20)
self.infered_shape = (5, -1, 20)
self.shape = (5, -1, -1)
class TestReshapeOpDimInfer2_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor):
class TestReshapeOpDimInfer2_attr_ShapeTensor(
TestReshapeOp_attr_ShapeTensor):
def init_data(self):
self.ori_shape = (10, 2, 6)
self.new_shape = (10, 0, 3, -1)
self.infered_shape = (10, 2, 3, -1)
self.shape = (10, 0, 3, -1)
# Situation 4: have shape(Tensor), no actual shape(Tensor)
class TestReshapeOp_attr_OnlyShape(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.inputs = {
"X": np.random.random(self.ori_shape).astype("float32"),
"Shape": np.array(
self.new_shape, dtype="int32")
}
self.attrs = {"use_xpu": True}
self.outputs = {
"Out": self.inputs["X"].reshape(self.infered_shape),
'XShape': np.random.random(self.ori_shape).astype("float32")
}
# Situation 4: have shape(Tensor), no actual shape(Tensor)
class TestReshapeOp_attr_OnlyShape(TestReshapeOp):
def init_data(self):
self.ori_shape = (4, 25)
self.new_shape = (10, 10)
self.infered_shape = (10, 10)
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, no_check_set=['XShape'])
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out")
def init_test_input(self):
self.inputs = {
"X": np.random.random(self.ori_shape).astype(self.dtype),
"Shape": np.array(
self.new_shape, dtype="int32")
}
def init_attrs(self):
self.attrs = {"use_xpu": True}
class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
def init_data(self):
self.ori_shape = (5, 20)
self.new_shape = (5, -1, 10)
self.infered_shape = (5, -1, 10)
self.shape = (5, -1, -1)
class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
def init_data(self):
self.ori_shape = (10, 2, 6)
self.new_shape = (10, 0, 3, -1)
......@@ -203,5 +172,9 @@ class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
self.shape = (10, 0, 3, -1)
support_types = get_xpu_op_support_types("reshape2")
for stype in support_types:
create_test_class(globals(), XPUTestReshapeOp, stype)
if __name__ == "__main__":
unittest.main()
......@@ -18,19 +18,27 @@ import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
paddle.enable_static()
class TestShapeOp(OpTest):
class XPUTestShapeOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = "shape"
self.use_dynamic_create_class = False
class TestShapeOp(XPUOpTest):
def setUp(self):
self.dtype = self.in_type
self.op_type = "shape"
self.config()
self.shape = [2, 3]
input = np.zeros(self.shape)
self.inputs = {'Input': input}
self.inputs = {'Input': input.astype(self.dtype)}
self.outputs = {'Out': np.array(self.shape)}
def config(self):
......@@ -41,18 +49,30 @@ class TestShapeOp(OpTest):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class case1(TestShapeOp):
class TestShapeOp1(TestShapeOp):
def config(self):
self.shape = [2]
class case2(TestShapeOp):
class TestShapeOp2(TestShapeOp):
def config(self):
self.shape = [1, 2, 3]
class TestShapeOp3(TestShapeOp):
def config(self):
self.shape = [1, 2, 3, 4]
class TestShapeOp4(TestShapeOp):
def config(self):
self.shape = [1, 2, 3, 4, 1024]
class TestShapeOp5(TestShapeOp):
def config(self):
self.shape = [1, 2, 3, 4, 1, 201]
class TestShapeWithSelectedRows(unittest.TestCase):
def setUp(self):
self.dtype = self.in_type
class TestShapeWithSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
......@@ -67,7 +87,7 @@ class TestShapeWithSelectedRows(unittest.TestCase):
height = 20
row_numel = 2
np_array = np.ones((len(x_rows), row_numel)).astype("float32")
np_array = np.ones((len(x_rows), row_numel)).astype(self.dtype)
# initialize input variable X
x = scope.var('X').get_selected_rows()
......@@ -75,8 +95,6 @@ class TestShapeWithSelectedRows(unittest.TestCase):
x.set_height(height)
x_tensor = x.get_tensor()
x_tensor.set(np_array, place)
# initialize input variable Out
out_shape = scope.var("Out").get_tensor()
op = Operator("shape", Input="X", Out="Out")
......@@ -90,5 +108,9 @@ class TestShapeWithSelectedRows(unittest.TestCase):
self.check_with_place(place)
support_types = get_xpu_op_support_types("shape")
for stype in support_types:
create_test_class(globals(), XPUTestShapeOp, stype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册