未验证 提交 f8a8dd5e 编写于 作者: W wanghuancoder 提交者: GitHub

delete old dygraph xpu op test (#51955)

* delete old dygraph xpu op test
上级 cc9bbd5b
......@@ -930,7 +930,14 @@ class OpTest(unittest.TestCase):
args, len(inputs_sig)
)
ret_tuple = python_api(*args)
return construct_output_dict_by_kernel_sig(ret_tuple, outputs_sig)
result = construct_output_dict_by_kernel_sig(ret_tuple, outputs_sig)
if hasattr(self, "python_out_sig_sub_name"):
for key in self.python_out_sig_sub_name.keys():
for i in range(len(self.python_out_sig_sub_name[key])):
result[key][0][i].name = self.python_out_sig_sub_name[
key
][i]
return result
with fluid.dygraph.base.guard(place=place):
block = fluid.default_main_program().global_block()
......@@ -965,7 +972,11 @@ class OpTest(unittest.TestCase):
dygraph_tensor_outputs,
attrs_outputs,
)
if not kernel_sig:
if not kernel_sig or (
len(kernel_sig[0]) == 0
and len(kernel_sig[1]) == 0
and len(kernel_sig[2]) == 0
):
return None
if not hasattr(self, "python_api"):
print(kernel_sig)
......@@ -1514,13 +1525,23 @@ class OpTest(unittest.TestCase):
core._set_prim_all_enabled(False)
core.set_prim_eager_enabled(False)
if hasattr(self, "use_custom_device") and self.use_custom_device():
check_dygraph = False
def find_imperative_actual(target_name, dygraph_outs, place):
for name in dygraph_outs:
if name == target_name:
return dygraph_outs[name][0]
var_list = dygraph_outs[name]
for i, var in enumerate(var_list):
if var.name == target_name:
if isinstance(var, list):
for tensor in var:
if tensor.name == target_name:
return tensor
elif (
isinstance(var, paddle.Tensor)
and var.name == target_name
):
return dygraph_outs[name][i]
self.assertTrue(
False,
......@@ -1653,6 +1674,8 @@ class OpTest(unittest.TestCase):
actual_np, expect_np = self.convert_uint16_to_float_ifneed(
actual_np, expect_np
)
# modify there for fp32 check
# NOTE(zhiqiu): np.allclose([], [1.]) returns True
# see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng
if expect_np.size == 0:
......@@ -1768,19 +1791,18 @@ class OpTest(unittest.TestCase):
place, no_check_set=no_check_set
)
self.outputs = dygraph_outs
if self.op_test.is_fp16_compared_with_fp32():
self.op_test.enable_cal_ref_output()
self.is_python_api_test = True
ref_dygraph_outs = self.op_test._calc_python_api_output(
self.ref_outputs = self.op_test._calc_python_api_output(
place
)
if ref_dygraph_outs is None:
if self.ref_outputs is None:
self.is_python_api_test = False
ref_dygraph_outs = self.op_test._calc_dygraph_output(
# missing KernelSignature, fall back to eager middle output.
self.ref_outputs = self.op_test._calc_dygraph_output(
place, no_check_set=no_check_set
)
self.ref_outputs = ref_dygraph_outs
self.op_test.disable_cal_ref_output()
def _compare_numpy(self, name, actual_np, expect_np):
......@@ -1911,7 +1933,7 @@ class OpTest(unittest.TestCase):
else:
atol = 2 if atol < 2 else atol
else:
atol = 1e-1 if atol < 1e-1 else atol
atol = 1e-2 if atol < 1e-2 else atol
if self.is_float16_op():
atol = 1e-3 if atol < 1e-3 else atol
......@@ -2050,6 +2072,9 @@ class OpTest(unittest.TestCase):
if self.is_xpu_op():
self.__class__.use_xpu = True
if hasattr(self, "use_custom_device") and self.use_custom_device():
check_dygraph = False
places = self._get_places()
for place in places:
res = self.check_output_with_place(
......@@ -2072,6 +2097,7 @@ class OpTest(unittest.TestCase):
self.check_compile_vs_runtime(fetch_list, outs)
def check_output_customized(self, checker, custom_place=None):
self.__class__.op_type = self.op_type
places = self._get_places()
if custom_place:
places.append(custom_place)
......@@ -2160,6 +2186,9 @@ class OpTest(unittest.TestCase):
else:
abs_a = 1 if abs_a < 1e-3 else abs_a
if self.dtype == np.bool_:
diff_mat = np.abs(a ^ b) / abs_a
else:
diff_mat = np.abs(a - b) / abs_a
max_diff = np.max(diff_mat)
......@@ -2205,6 +2234,9 @@ class OpTest(unittest.TestCase):
only_check_prim=False,
atol=1e-5,
):
if hasattr(self, "use_custom_device") and self.use_custom_device():
check_dygraph = False
self._check_grad_helper()
places = self._get_places()
for place in places:
......@@ -2241,6 +2273,9 @@ class OpTest(unittest.TestCase):
numeric_place=None,
atol=1e-5,
):
if hasattr(self, "use_custom_device") and self.use_custom_device():
check_dygraph = False
core._set_prim_all_enabled(False)
core.set_prim_eager_enabled(False)
if check_prim:
......@@ -2337,6 +2372,17 @@ class OpTest(unittest.TestCase):
if numeric_place is None:
numeric_place = place
if user_defined_grads is None and self.is_fp16_compared_with_fp32():
self.enable_cal_ref_output()
numeric_grads = self._get_gradient(
inputs_to_check,
place,
output_names,
no_grad_set,
user_defined_grad_outputs,
)
self.disable_cal_ref_output()
else:
numeric_grads = user_defined_grads or [
get_numeric_gradient(
numeric_place,
......@@ -2350,6 +2396,7 @@ class OpTest(unittest.TestCase):
)
for input_to_check in inputs_to_check
]
analytic_grads = self._get_gradient(
inputs_to_check,
place,
......@@ -2429,8 +2476,14 @@ class OpTest(unittest.TestCase):
else:
for output_vars_index in output_vars:
for output_vars_selected in output_vars[output_vars_index]:
if isinstance(output_vars_selected, list):
for tensor in output_vars_selected:
if tensor.name == name:
return [tensor]
elif isinstance(output_vars_selected, paddle.Tensor):
if output_vars_selected.name == name:
return output_vars_selected
return [output_vars_selected]
raise AssertionError(name, " not in outputs:", output_vars.keys())
def _get_dygraph_grad(
self,
......@@ -2441,6 +2494,9 @@ class OpTest(unittest.TestCase):
no_grad_set=None,
check_dygraph=True,
):
if hasattr(self, "use_custom_device") and self.use_custom_device():
check_dygraph = False
with fluid.dygraph.base.guard(place=place):
block = fluid.default_main_program().global_block()
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from testsuite import append_loss_ops, create_op, set_input
from white_list import no_grad_set_white_list, op_threshold_white_list
from xpu.get_test_cover_info import (
......@@ -71,7 +71,6 @@ class XPUOpTest(OpTest):
equal_nan=False,
check_dygraph=True,
inplace_atol=None,
check_eager=False,
):
place = paddle.XPUPlace(0)
self.check_output_with_place(
......@@ -81,7 +80,6 @@ class XPUOpTest(OpTest):
equal_nan,
check_dygraph,
inplace_atol,
check_eager,
)
def check_output_with_place(
......@@ -92,7 +90,6 @@ class XPUOpTest(OpTest):
equal_nan=False,
check_dygraph=True,
inplace_atol=None,
check_eager=False,
):
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
if self.dtype == np.float64:
......@@ -120,7 +117,6 @@ class XPUOpTest(OpTest):
user_defined_grad_outputs=None,
check_dygraph=True,
numeric_place=None,
check_eager=False,
):
place = paddle.XPUPlace(0)
self.check_grad_with_place(
......@@ -135,7 +131,6 @@ class XPUOpTest(OpTest):
user_defined_grad_outputs,
check_dygraph,
numeric_place,
check_eager,
)
def check_grad_with_place(
......@@ -151,7 +146,6 @@ class XPUOpTest(OpTest):
user_defined_grad_outputs=None,
check_dygraph=True,
numeric_place=None,
check_eager=False,
):
if hasattr(self, 'op_type_need_check_grad'):
xpu_version = core.get_xpu_device_version(0)
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -40,7 +40,6 @@ class XPUTestAtanOp(XPUOpTestWrapper):
def setUp(self):
self.set_xpu()
self.op_type = "atan"
self.eager_mode = True
# override
self.init_input_shape()
......@@ -62,9 +61,7 @@ class XPUTestAtanOp(XPUOpTestWrapper):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_eager=self.eager_mode
)
self.check_grad_with_place(self.place, ['X'], 'Out')
class Test1x1(TestAtanOp):
def init_input_shape(self):
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -97,7 +97,7 @@ class XPUTestClipOp(XPUOpTestWrapper):
if core.is_compiled_with_xpu():
paddle.enable_static()
self.check_grad_with_place(
self.place, ['X'], 'Out', check_eager=True
self.place, ['X'], 'Out', check_dygraph=True
)
paddle.disable_static()
......
......@@ -18,7 +18,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -58,11 +58,11 @@ class XPUTestDiagV2Op(XPUOpTestWrapper):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=False)
self.check_output(check_dygraph=False)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out', check_dygraph=False)
def init_config(self):
pass
......
......@@ -20,7 +20,7 @@ import numpy as np
import paddle
sys.path.append("..")
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
import unittest
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
import unittest
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import paddle
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -17,7 +17,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
import unittest
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -18,7 +18,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import convert_float_to_uint16
from eager_op_test import convert_float_to_uint16
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -20,7 +20,7 @@ import numpy as np
import paddle
sys.path.append("..")
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -18,7 +18,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
......
......@@ -52,7 +52,7 @@ class XPUTestGatherNd(XPUOpTestWrapper):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out', check_dygraph=False)
def init_data(self):
self.xnp = np.random.random((5, 20)).astype(self.in_type)
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -72,7 +72,7 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
self.outputs = {'Loss': loss.astype('float32')}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_dygraph=True)
def test_check_grad(self):
self.check_grad_with_place(
......@@ -80,7 +80,7 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
['X'],
'Loss',
no_grad_set=set(["Target"]),
check_eager=True,
check_dygraph=True,
)
def initTestCase(self):
......
......@@ -18,7 +18,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......
......@@ -91,14 +91,14 @@ class XPUTestLogSoftmaxOp(XPUOpTestWrapper):
pass
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_dygraph=True)
def test_check_grad(self):
self.check_grad(
['X'],
['Out'],
user_defined_grads=[self.x_grad],
check_eager=True,
check_dygraph=True,
)
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -94,10 +94,10 @@ class XPUTestPad3dOp(XPUOpTestWrapper):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_dygraph=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out', check_dygraph=True)
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
......
......@@ -78,7 +78,6 @@ class XPUTestPixelShuffleOp(XPUOpTestWrapper):
self.set_xpu()
self.op_type = "pixel_shuffle"
self.init_dtype()
self.eager_mode = True
# override
self.init_input_shape()
......@@ -109,9 +108,7 @@ class XPUTestPixelShuffleOp(XPUOpTestWrapper):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_eager=self.eager_mode
)
self.check_grad_with_place(self.place, ['X'], 'Out')
class TestNHWC(TestPixelShuffleOp):
def init_input_shape(self):
......
......@@ -43,7 +43,6 @@ class XPUTestPReluOp(XPUOpTestWrapper):
self.set_xpu()
self.op_type = "prelu"
self.init_dtype()
self.eager_mode = True
# override
self.init_input_shape()
......@@ -70,8 +69,6 @@ class XPUTestPReluOp(XPUOpTestWrapper):
)
else:
self.alpha = np.random.uniform(-1, -0.5, [1] + self.x_shape[1:])
# eager check don't support mode = 'all'
self.eager_mode = False
self.alpha = self.alpha.astype(self.dtype)
self.inputs = {'X': self.x, 'Alpha': self.alpha}
......@@ -115,7 +112,7 @@ class XPUTestPReluOp(XPUOpTestWrapper):
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X', 'Alpha'], 'Out', check_eager=self.eager_mode
self.place, ['X', 'Alpha'], 'Out', check_dygraph=False
)
class TestModeChannelNHWC(TestPReluOp):
......
......@@ -19,7 +19,7 @@ import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -18,7 +18,7 @@ sys.path.append("..")
import unittest
import numpy as np
from op_test import skip_check_grad_ci
from eager_op_test import skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
......
......@@ -83,10 +83,10 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper):
self.python_out_sig = ["Out"]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_dygraph=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out', check_dygraph=True)
def initTestCase(self):
self.x_shape = (6, 4, 4, 4)
......
......@@ -311,7 +311,7 @@ class XPUTestWarpCTCOp(XPUOpTestWrapper):
}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.outputs['WarpCTCGrad'] = self.gradient
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册