未验证 提交 6262efb5 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

[cherry-pick]to Release/2.3,modify scale op xpu unittest (#43657)

* modify xpu.cmake,*test=kunlun (#41832)

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* support bilstm,*test=kunlun

* [cherry-pick]support multi_layer of bilstm,*test=kunlun

* [cherry-pick]refactor sum unit test,*test=kunlun (#43561)
上级 638b69dc
...@@ -43,9 +43,7 @@ class XPUTestScaleOp(XPUOpTestWrapper): ...@@ -43,9 +43,7 @@ class XPUTestScaleOp(XPUOpTestWrapper):
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.set_inputs() self.set_inputs()
self.set_attrs() self.set_attrs()
self.outputs = { self.set_output()
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def set_xpu(self): def set_xpu(self):
self.__class__.use_xpu = True self.__class__.use_xpu = True
...@@ -55,6 +53,16 @@ class XPUTestScaleOp(XPUOpTestWrapper): ...@@ -55,6 +53,16 @@ class XPUTestScaleOp(XPUOpTestWrapper):
def set_inputs(self): def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)} self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
def set_output(self):
if "float16" == self.in_type:
output = self.inputs['X'] * np.float16(self.attrs['scale'])
elif "int64" == self.in_type:
output = self.inputs['X'] * np.int64(self.attrs['scale'])
else:
output = self.inputs['X'] * np.float32(self.attrs['scale'])
self.outputs = {'Out': output}
def init_dtype(self): def init_dtype(self):
if "float16" == self.in_type: if "float16" == self.in_type:
self.dtype = np.float16 self.dtype = np.float16
......
...@@ -19,56 +19,70 @@ import unittest ...@@ -19,56 +19,70 @@ import unittest
import numpy as np import numpy as np
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle import paddle
from paddle import enable_static
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, convert_uint16_to_float) OpTest, convert_float_to_uint16, convert_uint16_to_float)
from paddle import _C_ops from paddle import _C_ops
import op_test
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
class TestSumOp(XPUOpTest): class XPUTestSumOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
self.op_type = "sum" self.op_name = 'sum'
self.init_kernel_type() self.use_dynamic_create_class = False
self.init_kernel_type()
x0 = np.random.random((3, 40)).astype(self.dtype) class TestSumOp(XPUOpTest):
x1 = np.random.random((3, 40)).astype(self.dtype) def setUp(self):
x2 = np.random.random((3, 40)).astype(self.dtype) self.init_dtype()
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} self.set_xpu()
y = x0 + x1 + x2 self.op_type = "sum"
self.outputs = {'Out': y} self.place = paddle.XPUPlace(0)
self.set_shape()
x0 = np.random.random(self.shape).astype(self.dtype)
x1 = np.random.random(self.shape).astype(self.dtype)
x2 = np.random.random(self.shape).astype(self.dtype)
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
y = x0 + x1 + x2
self.outputs = {'Out': y}
def init_dtype(self):
self.dtype = self.in_type
def init_kernel_type(self): def set_xpu(self):
self.dtype = np.float32 self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def test_check_output(self): def set_shape(self):
self.check_output() self.shape = (3, 10)
def test_check_grad(self): def test_check_output(self):
self.check_grad(['x0'], 'Out') self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['x0'], 'Out')
#----------- test fp16 ----------- class TestSumOp1(TestSumOp):
class TestFP16SumOp(TestSumOp): def set_shape(self):
def init_kernel_type(self): self.shape = (5)
self.dtype = np.float16
def test_check_output(self): class TestSumOp2(TestSumOp):
place = core.XPUPlace(0) def set_shape(self):
# if core.is_float16_supported(place): self.shape = (1, 1, 1, 1, 1)
self.check_output_with_place(place, atol=2e-2)
# FIXME: Because of the precision fp16, max_relative_error class TestSumOp3(TestSumOp):
# should be 0.15 here. def set_shape(self):
def test_check_grad(self): self.shape = (10, 5, 7)
place = core.XPUPlace(0)
# if core.is_float16_supported(place): class TestSumOp4(TestSumOp):
self.check_grad_with_place( def set_shape(self):
place, ['x0'], 'Out', max_relative_error=0.15) self.shape = (2, 2, 3, 3)
def create_test_sum_fp16_class(parent): def create_test_sum_fp16_class(parent):
...@@ -182,6 +196,9 @@ class TestSumOpError(unittest.TestCase): ...@@ -182,6 +196,9 @@ class TestSumOpError(unittest.TestCase):
self.assertRaises(Exception, test_list_of_none_input) self.assertRaises(Exception, test_list_of_none_input)
support_types = get_xpu_op_support_types('sum')
for stype in support_types:
create_test_class(globals(), XPUTestSumOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
enable_static()
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册