From c8d6c146354e85864700b941fc288803f077b72b Mon Sep 17 00:00:00 2001 From: zhangxiaoci Date: Tue, 22 Feb 2022 11:14:52 +0800 Subject: [PATCH] refactor reshape2/shape unittest for kunlun (#39665) *test=kunlun --- .../unittests/xpu/test_reshape2_op_xpu.py | 339 ++++++++---------- .../tests/unittests/xpu/test_shape_op_xpu.py | 156 ++++---- 2 files changed, 245 insertions(+), 250 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py index 1a21b0f1972..0b000fc924a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py @@ -14,194 +14,167 @@ from __future__ import print_function -import unittest import numpy as np import sys - +import unittest sys.path.append("..") -from op_test import OpTest + import paddle -import paddle.fluid as fluid -from paddle.fluid import compiler, Program, program_guard - - -# situation 1: have shape( list, no tensor), no actual shape(Tensor) -class TestReshapeOp(OpTest): - def setUp(self): - self.init_data() - self.op_type = "reshape2" - self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} - self.attrs = {"shape": self.new_shape, "use_xpu": True} - self.outputs = { - "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") - } - - def init_data(self): - self.ori_shape = (2, 60) - self.new_shape = (12, 10) - self.infered_shape = (12, 10) - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place, no_check_set=['XShape']) - - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ["X"], "Out") - - -class TestReshapeOpDimInfer1(TestReshapeOp): - def init_data(self): - self.ori_shape = (5, 25) - self.new_shape = (5, -1, 5) - self.infered_shape = (5, -1, 5) - - -class TestReshapeOpDimInfer2(TestReshapeOp): - def init_data(self): - self.ori_shape = (10, 2, 6) - self.new_shape = (10, 0, 3, -1) - self.infered_shape = (10, 2, 3, -1) - - -# situation 2: have shape(list, no tensor), have actual shape(Tensor) -class TestReshapeOpWithInputShape(OpTest): - def setUp(self): - self.init_data() - self.op_type = "reshape2" - - self.inputs = { - "X": np.random.random(self.ori_shape).astype("float32"), - "Shape": np.array( - self.actual_shape, dtype="int32") - } - self.attrs = {"shape": self.new_shape, "use_xpu": True} - self.outputs = { - "Out": self.inputs["X"].reshape(self.actual_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") - } - - def init_data(self): - self.ori_shape = (6, 20) - self.new_shape = (0, -1, 20) - self.actual_shape = (2, 3, 20) - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place, no_check_set=['XShape']) - - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ["X"], "Out") - - -# Situation 3: have shape(list, have tensor), no actual shape(Tensor) -class TestReshapeOp_attr_ShapeTensor(OpTest): - def setUp(self): - self.init_data() - self.op_type = "reshape2" - - shape_tensor = [] - for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) - - self.inputs = { - "X": np.random.random(self.ori_shape).astype("float32"), - 'ShapeTensor': shape_tensor - } - self.attrs = {'shape': self.shape, "use_xpu": True} - self.outputs = { - "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") - } - - def init_data(self): - self.ori_shape = (4, 25) - self.new_shape = (10, 10) - self.infered_shape = (10, 10) - self.shape = (-1, -1) - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place, no_check_set=['XShape']) - - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ["X"], "Out") - - -class TestReshapeOpDimInfer1_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor): - def init_data(self): - self.ori_shape = (5, 20) - self.new_shape = (5, -1, 20) - self.infered_shape = (5, -1, 20) - self.shape = (5, -1, -1) - - -class TestReshapeOpDimInfer2_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor): - def init_data(self): - self.ori_shape = (10, 2, 6) - self.new_shape = (10, 0, 3, -1) - self.infered_shape = (10, 2, 3, -1) - self.shape = (10, 0, 3, -1) - - -# Situation 4: have shape(Tensor), no actual shape(Tensor) -class TestReshapeOp_attr_OnlyShape(OpTest): - def setUp(self): - self.init_data() - self.op_type = "reshape2" - - self.inputs = { - "X": np.random.random(self.ori_shape).astype("float32"), - "Shape": np.array( - self.new_shape, dtype="int32") - } - self.attrs = {"use_xpu": True} - self.outputs = { - "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") - } - - def init_data(self): - self.ori_shape = (4, 25) - self.new_shape = (10, 10) - self.infered_shape = (10, 10) - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place, no_check_set=['XShape']) - - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ["X"], "Out") - - -class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): - def init_data(self): - self.ori_shape = (5, 20) - self.new_shape = (5, -1, 10) - self.infered_shape = (5, -1, 10) - self.shape = (5, -1, -1) - - -class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): - def init_data(self): - self.ori_shape = (10, 2, 6) - self.new_shape = (10, 0, 3, -1) - self.infered_shape = (10, 2, 3, -1) - self.shape = (10, 0, 3, -1) +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper + +paddle.enable_static() + + +class XPUTestReshapeOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = "reshape2" + self.use_dynamic_create_class = False + + # situation 1: have shape( list, no tensor), no actual shape(Tensor) + class TestReshapeOp(XPUOpTest): + def setUp(self): + self.init_data() + self.op_type = "reshape2" + self.init_test_input() + self.init_test_output() + self.init_attrs() + + def init_data(self): + self.ori_shape = (2, 60) + self.new_shape = (12, 10) + self.infered_shape = (12, 10) + + def init_test_input(self): + self.inputs = { + "X": np.random.random(self.ori_shape).astype(self.dtype) + } + + def init_test_output(self): + self.outputs = { + "Out": self.inputs["X"].reshape(self.infered_shape), + 'XShape': np.random.random(self.ori_shape).astype(self.dtype) + } + + def init_attrs(self): + self.attrs = {"shape": self.new_shape, "use_xpu": True} + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place, no_check_set=['XShape']) + + def test_check_grad(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place(place, ["X"], "Out") + + class TestReshapeOpDimInfer1(TestReshapeOp): + def init_data(self): + self.ori_shape = (5, 25) + self.new_shape = (5, -1, 5) + self.infered_shape = (5, -1, 5) + + class TestReshapeOpDimInfer2(TestReshapeOp): + def init_data(self): + self.ori_shape = (10, 2, 6) + self.new_shape = (10, 0, 3, -1) + self.infered_shape = (10, 2, 3, -1) + + # situation 2: have shape(list, no tensor), have actual shape(Tensor) + class TestReshapeOpWithInputShape(TestReshapeOp): + def init_data(self): + self.ori_shape = (6, 20) + self.new_shape = (0, -1, 20) + self.actual_shape = (2, 3, 20) + + def init_test_input(self): + self.inputs = { + "X": np.random.random(self.ori_shape).astype(self.dtype), + "Shape": np.array( + self.actual_shape, dtype="int32") + } + + def init_test_output(self): + self.outputs = { + "Out": self.inputs["X"].reshape(self.actual_shape), + 'XShape': np.random.random(self.ori_shape).astype(self.dtype) + } + + # Situation 3: have shape(list, have tensor), no actual shape(Tensor) + class TestReshapeOp_attr_ShapeTensor(TestReshapeOp): + def init_data(self): + self.ori_shape = (4, 25) + self.new_shape = (10, 10) + self.infered_shape = (10, 10) + self.shape = (-1, -1) + + def init_test_input(self): + shape_tensor = [] + for index, ele in enumerate(self.new_shape): + shape_tensor.append(("x" + str(index), np.ones( + (1)).astype('int32') * ele)) + + self.inputs = { + "X": np.random.random(self.ori_shape).astype(self.dtype), + 'ShapeTensor': shape_tensor + } + + def init_attrs(self): + self.attrs = {'shape': self.shape, "use_xpu": True} + + class TestReshapeOpDimInfer1_attr_ShapeTensor( + TestReshapeOp_attr_ShapeTensor): + def init_data(self): + self.ori_shape = (5, 20) + self.new_shape = (5, -1, 20) + self.infered_shape = (5, -1, 20) + self.shape = (5, -1, -1) + + class TestReshapeOpDimInfer2_attr_ShapeTensor( + TestReshapeOp_attr_ShapeTensor): + def init_data(self): + self.ori_shape = (10, 2, 6) + self.new_shape = (10, 0, 3, -1) + self.infered_shape = (10, 2, 3, -1) + self.shape = (10, 0, 3, -1) + + # Situation 4: have shape(Tensor), no actual shape(Tensor) + class TestReshapeOp_attr_OnlyShape(TestReshapeOp): + def init_data(self): + self.ori_shape = (4, 25) + self.new_shape = (10, 10) + self.infered_shape = (10, 10) + + def init_test_input(self): + self.inputs = { + "X": np.random.random(self.ori_shape).astype(self.dtype), + "Shape": np.array( + self.new_shape, dtype="int32") + } + + def init_attrs(self): + self.attrs = {"use_xpu": True} + + class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): + def init_data(self): + self.ori_shape = (5, 20) + self.new_shape = (5, -1, 10) + self.infered_shape = (5, -1, 10) + self.shape = (5, -1, -1) + + class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): + def init_data(self): + self.ori_shape = (10, 2, 6) + self.new_shape = (10, 0, 3, -1) + self.infered_shape = (10, 2, 3, -1) + self.shape = (10, 0, 3, -1) + + +support_types = get_xpu_op_support_types("reshape2") +for stype in support_types: + create_test_class(globals(), XPUTestReshapeOp, stype) if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py index f194f3ca80c..c7fa72ca770 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py @@ -18,77 +18,99 @@ import unittest import numpy as np import sys sys.path.append("..") -from op_test import OpTest +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper import paddle from paddle.fluid import core from paddle.fluid.op import Operator - -class TestShapeOp(OpTest): - def setUp(self): - self.op_type = "shape" - self.config() - self.shape = [2, 3] - input = np.zeros(self.shape) - self.inputs = {'Input': input} - self.outputs = {'Out': np.array(self.shape)} - - def config(self): - self.shape = [2, 3] - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - -class case1(TestShapeOp): - def config(self): - self.shape = [2] - - -class case2(TestShapeOp): - def config(self): - self.shape = [1, 2, 3] - - -class TestShapeWithSelectedRows(unittest.TestCase): - def get_places(self): - places = [core.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(core.CUDAPlace(0)) - if core.is_compiled_with_xpu(): - places.append(core.XPUPlace(0)) - return places - - def check_with_place(self, place): - scope = core.Scope() - x_rows = [0, 1, 5, 4, 19] - height = 20 - row_numel = 2 - - np_array = np.ones((len(x_rows), row_numel)).astype("float32") - - # initialize input variable X - x = scope.var('X').get_selected_rows() - x.set_rows(x_rows) - x.set_height(height) - x_tensor = x.get_tensor() - x_tensor.set(np_array, place) - - # initialize input variable Out - out_shape = scope.var("Out").get_tensor() - op = Operator("shape", Input="X", Out="Out") - - op.run(scope, place) - - out_shape = np.array(out_shape).tolist() - self.assertListEqual([5, 2], out_shape) - - def test_check_output(self): - for place in self.get_places(): - self.check_with_place(place) - +paddle.enable_static() + + +class XPUTestShapeOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = "shape" + self.use_dynamic_create_class = False + + class TestShapeOp(XPUOpTest): + def setUp(self): + self.dtype = self.in_type + self.op_type = "shape" + self.config() + input = np.zeros(self.shape) + self.inputs = {'Input': input.astype(self.dtype)} + self.outputs = {'Out': np.array(self.shape)} + + def config(self): + self.shape = [2, 3] + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + class TestShapeOp1(TestShapeOp): + def config(self): + self.shape = [2] + + class TestShapeOp2(TestShapeOp): + def config(self): + self.shape = [1, 2, 3] + + class TestShapeOp3(TestShapeOp): + def config(self): + self.shape = [1, 2, 3, 4] + + class TestShapeOp4(TestShapeOp): + def config(self): + self.shape = [1, 2, 3, 4, 1024] + + class TestShapeOp5(TestShapeOp): + def config(self): + self.shape = [1, 2, 3, 4, 1, 201] + + class TestShapeWithSelectedRows(unittest.TestCase): + def setUp(self): + self.dtype = self.in_type + + def get_places(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(core.CUDAPlace(0)) + if core.is_compiled_with_xpu(): + places.append(core.XPUPlace(0)) + return places + + def check_with_place(self, place): + scope = core.Scope() + x_rows = [0, 1, 5, 4, 19] + height = 20 + row_numel = 2 + + np_array = np.ones((len(x_rows), row_numel)).astype(self.dtype) + + # initialize input variable X + x = scope.var('X').get_selected_rows() + x.set_rows(x_rows) + x.set_height(height) + x_tensor = x.get_tensor() + x_tensor.set(np_array, place) + out_shape = scope.var("Out").get_tensor() + op = Operator("shape", Input="X", Out="Out") + + op.run(scope, place) + + out_shape = np.array(out_shape).tolist() + self.assertListEqual([5, 2], out_shape) + + def test_check_output(self): + for place in self.get_places(): + self.check_with_place(place) + + +support_types = get_xpu_op_support_types("shape") +for stype in support_types: + create_test_class(globals(), XPUTestShapeOp, stype) if __name__ == '__main__': unittest.main() -- GitLab