未验证 提交 e2af9d56 编写于 作者: C Chen Zhiyang 提交者: GitHub

【New IR】New ir op test v1.1(sum passed) (#56756)

* add reference of lbfgs

* add reference of lbfgs

* new ir op test v1.0

* fix new ir optest bug1.0

* modify two testcase bug

* add new ir white list & pass test_mean_op.py

* rename white list

* add new_ir_guard

* new ir sum op test all pass

* rename backward.grad as ir_backward.grad

* check place for new ir

* fix test_build_model env bug

* fix test_prim_program backward bug

* change backward to ir_backward in check_appr

* add check_new_ir flag for mkldnn

* clean

---------
Co-authored-by: Nwangruting <wangruting@baidu.com>
上级 d74bfefe
...@@ -1510,6 +1510,9 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1510,6 +1510,9 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
dtype_flag = False dtype_flag = False
if dtype is not None: if dtype is not None:
if paddle.ir.core._use_new_ir_api():
dtype = paddle.ir.core.convert_np_dtype_to_dtype_(dtype)
else:
dtype_flag = True dtype_flag = True
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
......
...@@ -1927,6 +1927,7 @@ class OpTest(unittest.TestCase): ...@@ -1927,6 +1927,7 @@ class OpTest(unittest.TestCase):
only_check_prim=False, only_check_prim=False,
inplace_atol=None, inplace_atol=None,
check_cinn=False, check_cinn=False,
check_new_ir=True,
): ):
core._set_prim_all_enabled(False) core._set_prim_all_enabled(False)
core.set_prim_eager_enabled(False) core.set_prim_eager_enabled(False)
...@@ -2455,6 +2456,7 @@ class OpTest(unittest.TestCase): ...@@ -2455,6 +2456,7 @@ class OpTest(unittest.TestCase):
if ( if (
self.op_type self.op_type
in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list
and check_new_ir
): ):
if ( if (
type(place) is paddle.fluid.libpaddle.CPUPlace type(place) is paddle.fluid.libpaddle.CPUPlace
...@@ -2576,6 +2578,7 @@ class OpTest(unittest.TestCase): ...@@ -2576,6 +2578,7 @@ class OpTest(unittest.TestCase):
inplace_atol=None, inplace_atol=None,
check_cinn=False, check_cinn=False,
only_check_prim=False, only_check_prim=False,
check_new_ir=True,
): ):
self.__class__.op_type = self.op_type self.__class__.op_type = self.op_type
if self.is_mkldnn_op(): if self.is_mkldnn_op():
...@@ -2600,6 +2603,7 @@ class OpTest(unittest.TestCase): ...@@ -2600,6 +2603,7 @@ class OpTest(unittest.TestCase):
only_check_prim=only_check_prim, only_check_prim=only_check_prim,
inplace_atol=inplace_atol, inplace_atol=inplace_atol,
check_cinn=check_cinn, check_cinn=check_cinn,
check_new_ir=check_new_ir,
) )
if not res and only_check_prim: if not res and only_check_prim:
continue continue
...@@ -2766,6 +2770,7 @@ class OpTest(unittest.TestCase): ...@@ -2766,6 +2770,7 @@ class OpTest(unittest.TestCase):
only_check_prim=False, only_check_prim=False,
atol=1e-5, atol=1e-5,
check_cinn=False, check_cinn=False,
check_new_ir=True,
): ):
if hasattr(self, "use_custom_device") and self.use_custom_device: if hasattr(self, "use_custom_device") and self.use_custom_device:
check_dygraph = False check_dygraph = False
...@@ -2788,6 +2793,7 @@ class OpTest(unittest.TestCase): ...@@ -2788,6 +2793,7 @@ class OpTest(unittest.TestCase):
only_check_prim=only_check_prim, only_check_prim=only_check_prim,
atol=atol, atol=atol,
check_cinn=check_cinn, check_cinn=check_cinn,
check_new_ir=check_new_ir,
) )
def check_grad_with_place( def check_grad_with_place(
...@@ -2807,6 +2813,7 @@ class OpTest(unittest.TestCase): ...@@ -2807,6 +2813,7 @@ class OpTest(unittest.TestCase):
numeric_place=None, numeric_place=None,
atol=1e-5, atol=1e-5,
check_cinn=False, check_cinn=False,
check_new_ir=True,
): ):
if hasattr(self, "use_custom_device") and self.use_custom_device: if hasattr(self, "use_custom_device") and self.use_custom_device:
check_dygraph = False check_dygraph = False
...@@ -3007,6 +3014,7 @@ class OpTest(unittest.TestCase): ...@@ -3007,6 +3014,7 @@ class OpTest(unittest.TestCase):
if ( if (
self.op_type self.op_type
in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list
and check_new_ir
): ):
if ( if (
type(place) is paddle.fluid.libpaddle.CPUPlace type(place) is paddle.fluid.libpaddle.CPUPlace
......
...@@ -1180,7 +1180,11 @@ class Test3DReduce3(Test1DReduce): ...@@ -1180,7 +1180,11 @@ class Test3DReduce3(Test1DReduce):
def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False): def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False):
if paddle.in_dynamic_mode():
return paddle._C_ops.sum(x, axis, dtype, keepdim) return paddle._C_ops.sum(x, axis, dtype, keepdim)
else:
if paddle.ir.core._use_new_ir_api():
return paddle._ir_ops.sum(x, axis, dtype, keepdim)
class Test8DReduce0(Test1DReduce): class Test8DReduce0(Test1DReduce):
......
...@@ -40,7 +40,7 @@ class TestReduceSumDefaultBF16OneDNNOp(OpTest): ...@@ -40,7 +40,7 @@ class TestReduceSumDefaultBF16OneDNNOp(OpTest):
self.attrs = {'use_mkldnn': self.use_mkldnn} self.attrs = {'use_mkldnn': self.use_mkldnn}
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=False) self.check_output(check_dygraph=False, check_new_ir=False)
def calculate_grads(self): def calculate_grads(self):
tmp_tensor = np.zeros(self.x_fp32.shape).astype("float32") tmp_tensor = np.zeros(self.x_fp32.shape).astype("float32")
...@@ -84,6 +84,7 @@ class TestReduceDefaultWithGradBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): ...@@ -84,6 +84,7 @@ class TestReduceDefaultWithGradBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp):
check_dygraph=False, check_dygraph=False,
user_defined_grads=[self.grad_X], user_defined_grads=[self.grad_X],
user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)], user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)],
check_new_ir=False,
) )
......
...@@ -29,12 +29,12 @@ class TestReduceSumDefaultOneDNNOp(OpTest): ...@@ -29,12 +29,12 @@ class TestReduceSumDefaultOneDNNOp(OpTest):
self.attrs = {'use_mkldnn': self.use_mkldnn} self.attrs = {'use_mkldnn': self.use_mkldnn}
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=False) self.check_output(check_dygraph=False, check_new_ir=False)
class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp): class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp):
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_dygraph=False) self.check_grad(['X'], 'Out', check_dygraph=False, check_new_ir=False)
class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp): class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp):
......
...@@ -14,4 +14,5 @@ ...@@ -14,4 +14,5 @@
new_ir_python_api_grad_white_list = [ new_ir_python_api_grad_white_list = [
"mean", "mean",
"reduce_sum",
] ]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册