未验证 提交 54d98963 编写于 作者: Z zhangxiaoci 提交者: GitHub

refactor xpu tests for squeeze/unsqueeze, *test=kunlun (#44812)

上级 153f1138
...@@ -468,6 +468,22 @@ XPUOpMap& get_kl2_ops() { ...@@ -468,6 +468,22 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::INT8, XPUPlace()), pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()), pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})}, pOpKernelType(vartype::FP32, XPUPlace())})},
{"squeeze_grad",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"squeeze",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"stack", {"stack",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()), pOpKernelType(vartype::INT64, XPUPlace()),
...@@ -534,6 +550,22 @@ XPUOpMap& get_kl2_ops() { ...@@ -534,6 +550,22 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::UINT8, XPUPlace()), pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::FP32, XPUPlace()),
pOpKernelType(vartype::FP16, XPUPlace())})}, pOpKernelType(vartype::FP16, XPUPlace())})},
{"unsqueeze_grad",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"unsqueeze",
XPUKernelSet({pOpKernelType(vartype::FP64, XPUPlace()),
pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::INT8, XPUPlace()),
pOpKernelType(vartype::UINT8, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"where_index", {"where_index",
XPUKernelSet({pOpKernelType(vartype::INT32, XPUPlace()), XPUKernelSet({pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::BOOL, XPUPlace()), pOpKernelType(vartype::BOOL, XPUPlace()),
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -68,17 +68,10 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper): ...@@ -68,17 +68,10 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper):
def test_check_grad(self): def test_check_grad(self):
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
if self.dtype in [np.float32, np.float64]: if self.dtype == np.bool_:
self.check_grad_with_place(place, ['X'], 'Out')
elif self.dtype == np.bool_:
return return
else: else:
user_defined_grad_outputs = np.random.random( self.check_grad_with_place(place, ['X'], 'Out')
self.new_shape).astype(self.dtype)
self.check_grad_with_place(
place, ['X'],
'Out',
user_defined_grad_outputs=user_defined_grad_outputs)
# Correct: There is mins axis. # Correct: There is mins axis.
class TestSqueeze2Op1(TestSqueeze2Op): class TestSqueeze2Op1(TestSqueeze2Op):
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -22,80 +22,90 @@ import numpy as np ...@@ -22,80 +22,90 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import Program, program_guard
from op_test import OpTest from op_test import OpTest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
# Correct: General. class XPUTestSqueezeOp(XPUOpTestWrapper):
class TestSqueezeOp(XPUOpTest):
def setUp(self): def __init__(self):
self.op_type = "squeeze" self.op_name = "squeeze"
self.use_xpu = True self.use_dynamic_create_class = False
self.use_mkldnn = False
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
}
def test_check_output(self): # Correct: General.
if paddle.is_compiled_with_xpu(): class TestSqueezeOp(XPUOpTest):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def setUp(self):
self.ori_shape = (1, 3, 1, 40) self.op_type = "squeeze"
self.axes = (0, 2) self.__class__.op_type = "squeeze"
self.new_shape = (3, 40) self.use_mkldnn = False
self.init_dtype()
self.init_test_case()
self.inputs = {
"X": np.random.random(self.ori_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
}
def init_attrs(self): def init_dtype(self):
self.attrs = {"axes": self.axes} self.dtype = self.in_type
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
# Correct: There is mins axis. def test_check_grad(self):
class TestSqueezeOp1(TestSqueezeOp): place = paddle.XPUPlace(0)
if self.dtype == np.bool_:
return
else:
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.ori_shape = (1, 3, 1, 40) self.ori_shape = (1, 3, 1, 40)
self.axes = (0, -2) self.axes = (0, 2)
self.new_shape = (3, 40) self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: No axes input. # Correct: There is mins axis.
class TestSqueezeOp2(TestSqueezeOp): class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self): def init_test_case(self):
self.ori_shape = (1, 20, 1, 5) self.ori_shape = (1, 3, 1, 40)
self.axes = () self.axes = (0, -2)
self.new_shape = (20, 5) self.new_shape = (3, 40)
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
# Correct: Just part of axes be squeezed. def init_test_case(self):
class TestSqueezeOp3(TestSqueezeOp): self.ori_shape = (1, 20, 1, 5)
self.axes = ()
self.new_shape = (20, 5)
def init_test_case(self): # Correct: Just part of axes be squeezed.
self.ori_shape = (6, 1, 5, 1, 4, 1) class TestSqueezeOp3(TestSqueezeOp):
self.axes = (1, -1)
self.new_shape = (6, 5, 1, 4)
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (6, 5, 1, 4)
# Correct: The demension of axis is not of size 1 remains unchanged. # Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp): class TestSqueezeOp4(TestSqueezeOp):
def init_test_case(self): def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1) self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, 2) self.axes = (1, 2)
self.new_shape = (6, 5, 1, 4, 1) self.new_shape = (6, 5, 1, 4, 1)
class TestSqueezeOpError(unittest.TestCase): class TestSqueezeOpError(unittest.TestCase):
...@@ -115,5 +125,9 @@ class TestSqueezeOpError(unittest.TestCase): ...@@ -115,5 +125,9 @@ class TestSqueezeOpError(unittest.TestCase):
self.assertRaises(TypeError, paddle.squeeze, x3, axes=0) self.assertRaises(TypeError, paddle.squeeze, x3, axes=0)
support_types = get_xpu_op_support_types("squeeze")
for stype in support_types:
create_test_class(globals(), XPUTestSqueezeOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -69,17 +69,10 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): ...@@ -69,17 +69,10 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper):
def test_check_grad(self): def test_check_grad(self):
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
if self.dtype in [np.float32, np.float64, np.float16]: if self.dtype == np.bool_:
self.check_grad_with_place(place, ['X'], 'Out')
elif self.dtype == np.bool_:
return return
else: else:
user_defined_grad_outputs = np.random.random( self.check_grad_with_place(place, ['X'], 'Out')
self.new_shape).astype(self.dtype)
self.check_grad_with_place(
place, ['X'],
'Out',
user_defined_grad_outputs=user_defined_grad_outputs)
# Correct: Single input index. # Correct: Single input index.
class TestUnsqueeze2Op1(TestUnsqueeze2Op): class TestUnsqueeze2Op1(TestUnsqueeze2Op):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -24,76 +24,89 @@ import paddle ...@@ -24,76 +24,89 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
# Correct: General. # Correct: General.
class TestUnsqueezeOp(XPUOpTest): class XPUTestUnsqueezeOp(XPUOpTestWrapper):
def setUp(self):
self.init_test_case()
self.op_type = "unsqueeze"
self.use_xpu = True
self.use_mkldnn = False
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self): def __init__(self):
if paddle.is_compiled_with_xpu(): self.op_name = "unsqueeze"
place = paddle.XPUPlace(0) self.use_dynamic_create_class = False
self.check_grad_with_place(place, ['X'], 'Out')
class TestUnsqueezeOp(XPUOpTest):
def init_test_case(self): def setUp(self):
self.ori_shape = (3, 40) self.op_type = "unsqueeze"
self.axes = (1, 2) self.__class__.op_type = "unsqueeze"
self.new_shape = (3, 1, 1, 40) self.use_mkldnn = False
self.init_test_case()
self.inputs = {
"X": np.random.random(self.ori_shape).astype(self.dtype)
}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def init_attrs(self): def init_dtype(self):
self.attrs = {"axes": self.axes} self.dtype = self.in_type
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = paddle.XPUPlace(0)
if self.dtype == np.bool_:
return
else:
self.check_grad_with_place(place, ['X'], 'Out')
# Correct: Single input index. def init_test_case(self):
class TestUnsqueezeOp1(TestUnsqueezeOp): self.ori_shape = (3, 40)
self.axes = (1, 2)
self.new_shape = (3, 1, 1, 40)
def init_test_case(self): def init_attrs(self):
self.ori_shape = (20, 5) self.attrs = {"axes": self.axes}
self.axes = (-1, )
self.new_shape = (20, 5, 1)
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
# Correct: Mixed input axis. def init_test_case(self):
class TestUnsqueezeOp2(TestUnsqueezeOp): self.ori_shape = (20, 5)
self.axes = (-1, )
self.new_shape = (20, 5, 1)
def init_test_case(self): # Correct: Mixed input axis.
self.ori_shape = (20, 5) class TestUnsqueezeOp2(TestUnsqueezeOp):
self.axes = (0, -1)
self.new_shape = (1, 20, 5, 1)
def init_test_case(self):
self.ori_shape = (20, 5)
self.axes = (0, -1)
self.new_shape = (1, 20, 5, 1)
# Correct: There is duplicated axis. # Correct: There is duplicated axis.
class TestUnsqueezeOp3(TestUnsqueezeOp): class TestUnsqueezeOp3(TestUnsqueezeOp):
def init_test_case(self): def init_test_case(self):
self.ori_shape = (10, 2, 5) self.ori_shape = (10, 2, 5)
self.axes = (0, 3, 3) self.axes = (0, 3, 3)
self.new_shape = (1, 10, 2, 1, 1, 5) self.new_shape = (1, 10, 2, 1, 1, 5)
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
# Correct: Reversed axes. def init_test_case(self):
class TestUnsqueezeOp4(TestUnsqueezeOp): self.ori_shape = (10, 2, 5)
self.axes = (3, 1, 1)
self.new_shape = (10, 1, 1, 2, 5, 1)
def init_test_case(self):
self.ori_shape = (10, 2, 5)
self.axes = (3, 1, 1)
self.new_shape = (10, 1, 1, 2, 5, 1)
support_types = get_xpu_op_support_types("unsqueeze")
for stype in support_types:
create_test_class(globals(), XPUTestUnsqueezeOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册