未验证 提交 54d98963 编写于 作者: Z zhangxiaoci 提交者: GitHub

refactor xpu tests for squeeze/unsqueeze, *test=kunlun (#44812)

上级 153f1138
......@@ -468,6 +468,22 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"squeeze_grad",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"squeeze",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"stack",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
......@@ -534,6 +550,22 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})},
{"unsqueeze_grad",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"unsqueeze",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"where_index",
XPUKernelSet({pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -68,17 +68,10 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper):
def test_check_grad(self):
place = paddle.XPUPlace(0)
if self.dtype in [np.float32, np.float64]:
self.check_grad_with_place(place, ['X'], 'Out')
elif self.dtype == np.bool_:
if self.dtype == np.bool_:
return
else:
user_defined_grad_outputs = np.random.random(
self.new_shape).astype(self.dtype)
self.check_grad_with_place(
place, ['X'],
'Out',
user_defined_grad_outputs=user_defined_grad_outputs)
self.check_grad_with_place(place, ['X'], 'Out')
# Correct: There is mins axis.
class TestSqueeze2Op1(TestSqueeze2Op):
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -22,80 +22,90 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import Program, program_guard
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
# Correct: General.
class TestSqueezeOp(XPUOpTest):
class XPUTestSqueezeOp(XPUOpTestWrapper):
def setUp(self):
self.op_type = "squeeze"
self.use_xpu = True
self.use_mkldnn = False
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
}
def __init__(self):
self.op_name = "squeeze"
self.use_dynamic_create_class = False
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
# Correct: General.
class TestSqueezeOp(XPUOpTest):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 40)
def setUp(self):
self.op_type = "squeeze"
self.__class__.op_type = "squeeze"
self.use_mkldnn = False
self.init_dtype()
self.init_test_case()
self.inputs = {
"X": np.random.random(self.ori_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
}
def init_attrs(self):
self.attrs = {"axes": self.axes}
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def test_check_grad(self):
place = paddle.XPUPlace(0)
if self.dtype == np.bool_:
return
else:
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, -2)
self.new_shape = (3, 40)
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
self.axes = ()
self.new_shape = (20, 5)
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, -2)
self.new_shape = (3, 40)
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
# Correct: Just part of axes be squeezed.
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
self.axes = ()
self.new_shape = (20, 5)
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (6, 5, 1, 4)
# Correct: Just part of axes be squeezed.
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (6, 5, 1, 4)
# Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp):
# Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, 2)
self.new_shape = (6, 5, 1, 4, 1)
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, 2)
self.new_shape = (6, 5, 1, 4, 1)
class TestSqueezeOpError(unittest.TestCase):
......@@ -115,5 +125,9 @@ class TestSqueezeOpError(unittest.TestCase):
self.assertRaises(TypeError, paddle.squeeze, x3, axes=0)
support_types = get_xpu_op_support_types("squeeze")
for stype in support_types:
create_test_class(globals(), XPUTestSqueezeOp, stype)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -69,17 +69,10 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper):
def test_check_grad(self):
place = paddle.XPUPlace(0)
if self.dtype in [np.float32, np.float64, np.float16]:
self.check_grad_with_place(place, ['X'], 'Out')
elif self.dtype == np.bool_:
if self.dtype == np.bool_:
return
else:
user_defined_grad_outputs = np.random.random(
self.new_shape).astype(self.dtype)
self.check_grad_with_place(
place, ['X'],
'Out',
user_defined_grad_outputs=user_defined_grad_outputs)
self.check_grad_with_place(place, ['X'], 'Out')
# Correct: Single input index.
class TestUnsqueeze2Op1(TestUnsqueeze2Op):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -24,76 +24,89 @@ import paddle
import paddle.fluid as fluid
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
# Correct: General.
class TestUnsqueezeOp(XPUOpTest):
def setUp(self):
self.init_test_case()
self.op_type = "unsqueeze"
self.use_xpu = True
self.use_mkldnn = False
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class XPUTestUnsqueezeOp(XPUOpTestWrapper):
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
def __init__(self):
self.op_name = "unsqueeze"
self.use_dynamic_create_class = False
class TestUnsqueezeOp(XPUOpTest):
def init_test_case(self):
self.ori_shape = (3, 40)
self.axes = (1, 2)
self.new_shape = (3, 1, 1, 40)
def setUp(self):
self.op_type = "unsqueeze"
self.__class__.op_type = "unsqueeze"
self.use_mkldnn = False
self.init_test_case()
self.inputs = {
"X": np.random.random(self.ori_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def init_attrs(self):
self.attrs = {"axes": self.axes}
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = paddle.XPUPlace(0)
if self.dtype == np.bool_:
return
else:
self.check_grad_with_place(place, ['X'], 'Out')
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
def init_test_case(self):
self.ori_shape = (3, 40)
self.axes = (1, 2)
self.new_shape = (3, 1, 1, 40)
def init_test_case(self):
self.ori_shape = (20, 5)
self.axes = (-1, )
self.new_shape = (20, 5, 1)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
# Correct: Mixed input axis.
class TestUnsqueezeOp2(TestUnsqueezeOp):
def init_test_case(self):
self.ori_shape = (20, 5)
self.axes = (-1, )
self.new_shape = (20, 5, 1)
def init_test_case(self):
self.ori_shape = (20, 5)
self.axes = (0, -1)
self.new_shape = (1, 20, 5, 1)
# Correct: Mixed input axis.
class TestUnsqueezeOp2(TestUnsqueezeOp):
def init_test_case(self):
self.ori_shape = (20, 5)
self.axes = (0, -1)
self.new_shape = (1, 20, 5, 1)
# Correct: There is duplicated axis.
class TestUnsqueezeOp3(TestUnsqueezeOp):
# Correct: There is duplicated axis.
class TestUnsqueezeOp3(TestUnsqueezeOp):
def init_test_case(self):
self.ori_shape = (10, 2, 5)
self.axes = (0, 3, 3)
self.new_shape = (1, 10, 2, 1, 1, 5)
def init_test_case(self):
self.ori_shape = (10, 2, 5)
self.axes = (0, 3, 3)
self.new_shape = (1, 10, 2, 1, 1, 5)
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
def init_test_case(self):
self.ori_shape = (10, 2, 5)
self.axes = (3, 1, 1)
self.new_shape = (10, 1, 1, 2, 5, 1)
def init_test_case(self):
self.ori_shape = (10, 2, 5)
self.axes = (3, 1, 1)
self.new_shape = (10, 1, 1, 2, 5, 1)
support_types = get_xpu_op_support_types("unsqueeze")
for stype in support_types:
create_test_class(globals(), XPUTestUnsqueezeOp, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册