未验证 提交 a127d7c8 编写于 作者: K kangguangli 提交者: GitHub

[NewIR] minor fix about new ir test (#56075)

* fix bugs about new ir test

* enable dy2st newir test in all cases

* fix
上级 1bf2ab48
......@@ -3962,7 +3962,6 @@ function main() {
check_coverage_build
;;
gpu_cicheck_coverage)
export FLAGS_NEW_IR_DY2ST_TEST=True
parallel_test
check_coverage
;;
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import wraps
import numpy as np
from paddle import set_flags, static
from paddle.fluid import core
def test_with_new_ir(func):
@wraps(func)
def impl(*args, **kwargs):
ir_outs = None
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
try:
new_ir_flag = 'FLAGS_enable_new_ir_in_executor'
os.environ[new_ir_flag] = 'True'
set_flags({new_ir_flag: True})
ir_outs = func(*args, **kwargs)
finally:
del os.environ[new_ir_flag]
set_flags({new_ir_flag: False})
return ir_outs
return impl
def test_and_compare_with_new_ir(need_check_output: bool = True):
def decorator(func):
@wraps(func)
def impl(*args, **kwargs):
outs = func(*args, **kwargs)
if core._is_bwd_prim_enabled() or core._is_fwd_prim_enabled():
return outs
# only run in CI-Coverage
if os.environ.get('FLAGS_NEW_IR_DY2ST_TEST', None) is None:
return outs
ir_outs = test_with_new_ir(func)(*args, **kwargs)
if not need_check_output:
return outs
for i in range(len(outs)):
np.testing.assert_array_equal(
outs[i],
ir_outs[i],
err_msg='Dy2St Unittest Check ('
+ func.__name__
+ ') has diff '
+ '\nExpect '
+ str(outs[i])
+ '\n'
+ 'But Got'
+ str(ir_outs[i]),
)
return outs
return impl
return decorator
......@@ -145,9 +145,6 @@ def test_and_compare_with_new_ir(need_check_output: bool = True):
outs = func(*args, **kwargs)
if core._is_bwd_prim_enabled() or core._is_fwd_prim_enabled():
return outs
# only run in CI-Coverage
if os.environ.get('FLAGS_NEW_IR_DY2ST_TEST', None) is None:
return outs
ir_outs = test_with_new_ir(func)(*args, **kwargs)
if not need_check_output:
return outs
......
......@@ -47,6 +47,7 @@ from paddle.fluid.framework import (
Program,
_current_expected_place,
canonicalize_attrs,
get_flags,
set_flags,
)
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
......@@ -1209,45 +1210,46 @@ class OpTest(unittest.TestCase):
return
if self._check_cinn:
return
set_flags({"FLAGS_enable_new_ir_in_executor": True})
new_scope = paddle.static.Scope()
executor = Executor(place)
new_program = None
if isinstance(program, paddle.static.CompiledProgram):
new_program = fluid.CompiledProgram(
program._program, build_strategy=program._build_strategy
)
else:
new_program = program.clone()
ir_outs = executor.run(
new_program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False,
scope=new_scope,
)
assert len(outs) == len(
ir_outs
), "Fetch result should have same length when executed in new ir"
for i in range(len(outs)):
np.testing.assert_array_equal(
outs[i],
ir_outs[i],
err_msg='Operator Check ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(outs[i])
+ '\n'
+ 'But Got'
+ str(ir_outs[i])
+ ' in class '
+ self.__class__.__name__,
stored_flag = get_flags('FLAGS_enable_new_ir_in_executor')
try:
set_flags({"FLAGS_enable_new_ir_in_executor": True})
new_scope = paddle.static.Scope()
executor = Executor(place)
new_program = None
if isinstance(program, paddle.static.CompiledProgram):
new_program = fluid.CompiledProgram(
program._program, build_strategy=program._build_strategy
)
else:
new_program = program.clone()
ir_outs = executor.run(
new_program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False,
scope=new_scope,
)
set_flags({"FLAGS_enable_new_ir_in_executor": False})
assert len(outs) == len(
ir_outs
), "Fetch result should have same length when executed in new ir"
for i in range(len(outs)):
np.testing.assert_array_equal(
outs[i],
ir_outs[i],
err_msg='Operator Check ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(outs[i])
+ '\n'
+ 'But Got'
+ str(ir_outs[i])
+ ' in class '
+ self.__class__.__name__,
)
finally:
set_flags(stored_flag)
def _calc_output(
self,
......@@ -2896,40 +2898,41 @@ class OpTest(unittest.TestCase):
if self._check_cinn:
return
set_flags({"FLAGS_enable_new_ir_in_executor": True})
executor = Executor(place)
new_gradients = list(
map(
np.array,
executor.run(
program,
feed_dict,
fetch_list,
scope=scope,
return_numpy=False,
),
)
)
for i in range(len(new_gradients)):
np.testing.assert_array_equal(
gradients[i],
new_gradients[i],
err_msg='Operator GradCheck ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(gradients[i])
+ '\n'
+ 'But Got'
+ str(new_gradients[i])
+ ' in class '
+ self.__class__.__name__,
stored_flag = get_flags('FLAGS_enable_new_ir_in_executor')
try:
set_flags({"FLAGS_enable_new_ir_in_executor": True})
executor = Executor(place)
new_gradients = list(
map(
np.array,
executor.run(
program,
feed_dict,
fetch_list,
scope=scope,
return_numpy=False,
),
)
)
set_flags({"FLAGS_enable_new_ir_in_executor": False})
for i in range(len(new_gradients)):
np.testing.assert_array_equal(
gradients[i],
new_gradients[i],
err_msg='Operator GradCheck ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(gradients[i])
+ '\n'
+ 'But Got'
+ str(new_gradients[i])
+ ' in class '
+ self.__class__.__name__,
)
finally:
set_flags(stored_flag)
def _get_gradient(
self,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册