未验证 提交 a127d7c8 编写于 作者: K kangguangli 提交者: GitHub

[NewIR] minor fix about new ir test (#56075)

* fix bugs about new ir test

* enable dy2st newir test in all cases

* fix
上级 1bf2ab48
...@@ -3962,7 +3962,6 @@ function main() { ...@@ -3962,7 +3962,6 @@ function main() {
check_coverage_build check_coverage_build
;; ;;
gpu_cicheck_coverage) gpu_cicheck_coverage)
export FLAGS_NEW_IR_DY2ST_TEST=True
parallel_test parallel_test
check_coverage check_coverage
;; ;;
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import wraps
import numpy as np
from paddle import set_flags, static
from paddle.fluid import core
def test_with_new_ir(func):
@wraps(func)
def impl(*args, **kwargs):
ir_outs = None
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
try:
new_ir_flag = 'FLAGS_enable_new_ir_in_executor'
os.environ[new_ir_flag] = 'True'
set_flags({new_ir_flag: True})
ir_outs = func(*args, **kwargs)
finally:
del os.environ[new_ir_flag]
set_flags({new_ir_flag: False})
return ir_outs
return impl
def test_and_compare_with_new_ir(need_check_output: bool = True):
def decorator(func):
@wraps(func)
def impl(*args, **kwargs):
outs = func(*args, **kwargs)
if core._is_bwd_prim_enabled() or core._is_fwd_prim_enabled():
return outs
# only run in CI-Coverage
if os.environ.get('FLAGS_NEW_IR_DY2ST_TEST', None) is None:
return outs
ir_outs = test_with_new_ir(func)(*args, **kwargs)
if not need_check_output:
return outs
for i in range(len(outs)):
np.testing.assert_array_equal(
outs[i],
ir_outs[i],
err_msg='Dy2St Unittest Check ('
+ func.__name__
+ ') has diff '
+ '\nExpect '
+ str(outs[i])
+ '\n'
+ 'But Got'
+ str(ir_outs[i]),
)
return outs
return impl
return decorator
...@@ -145,9 +145,6 @@ def test_and_compare_with_new_ir(need_check_output: bool = True): ...@@ -145,9 +145,6 @@ def test_and_compare_with_new_ir(need_check_output: bool = True):
outs = func(*args, **kwargs) outs = func(*args, **kwargs)
if core._is_bwd_prim_enabled() or core._is_fwd_prim_enabled(): if core._is_bwd_prim_enabled() or core._is_fwd_prim_enabled():
return outs return outs
# only run in CI-Coverage
if os.environ.get('FLAGS_NEW_IR_DY2ST_TEST', None) is None:
return outs
ir_outs = test_with_new_ir(func)(*args, **kwargs) ir_outs = test_with_new_ir(func)(*args, **kwargs)
if not need_check_output: if not need_check_output:
return outs return outs
......
...@@ -47,6 +47,7 @@ from paddle.fluid.framework import ( ...@@ -47,6 +47,7 @@ from paddle.fluid.framework import (
Program, Program,
_current_expected_place, _current_expected_place,
canonicalize_attrs, canonicalize_attrs,
get_flags,
set_flags, set_flags,
) )
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
...@@ -1209,45 +1210,46 @@ class OpTest(unittest.TestCase): ...@@ -1209,45 +1210,46 @@ class OpTest(unittest.TestCase):
return return
if self._check_cinn: if self._check_cinn:
return return
stored_flag = get_flags('FLAGS_enable_new_ir_in_executor')
set_flags({"FLAGS_enable_new_ir_in_executor": True}) try:
new_scope = paddle.static.Scope() set_flags({"FLAGS_enable_new_ir_in_executor": True})
executor = Executor(place) new_scope = paddle.static.Scope()
new_program = None executor = Executor(place)
if isinstance(program, paddle.static.CompiledProgram): new_program = None
new_program = fluid.CompiledProgram( if isinstance(program, paddle.static.CompiledProgram):
program._program, build_strategy=program._build_strategy new_program = fluid.CompiledProgram(
) program._program, build_strategy=program._build_strategy
else: )
new_program = program.clone() else:
ir_outs = executor.run( new_program = program.clone()
new_program, ir_outs = executor.run(
feed=feed_map, new_program,
fetch_list=fetch_list, feed=feed_map,
return_numpy=False, fetch_list=fetch_list,
scope=new_scope, return_numpy=False,
) scope=new_scope,
assert len(outs) == len(
ir_outs
), "Fetch result should have same length when executed in new ir"
for i in range(len(outs)):
np.testing.assert_array_equal(
outs[i],
ir_outs[i],
err_msg='Operator Check ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(outs[i])
+ '\n'
+ 'But Got'
+ str(ir_outs[i])
+ ' in class '
+ self.__class__.__name__,
) )
assert len(outs) == len(
set_flags({"FLAGS_enable_new_ir_in_executor": False}) ir_outs
), "Fetch result should have same length when executed in new ir"
for i in range(len(outs)):
np.testing.assert_array_equal(
outs[i],
ir_outs[i],
err_msg='Operator Check ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(outs[i])
+ '\n'
+ 'But Got'
+ str(ir_outs[i])
+ ' in class '
+ self.__class__.__name__,
)
finally:
set_flags(stored_flag)
def _calc_output( def _calc_output(
self, self,
...@@ -2896,40 +2898,41 @@ class OpTest(unittest.TestCase): ...@@ -2896,40 +2898,41 @@ class OpTest(unittest.TestCase):
if self._check_cinn: if self._check_cinn:
return return
set_flags({"FLAGS_enable_new_ir_in_executor": True}) stored_flag = get_flags('FLAGS_enable_new_ir_in_executor')
try:
executor = Executor(place) set_flags({"FLAGS_enable_new_ir_in_executor": True})
new_gradients = list( executor = Executor(place)
map( new_gradients = list(
np.array, map(
executor.run( np.array,
program, executor.run(
feed_dict, program,
fetch_list, feed_dict,
scope=scope, fetch_list,
return_numpy=False, scope=scope,
), return_numpy=False,
) ),
) )
for i in range(len(new_gradients)):
np.testing.assert_array_equal(
gradients[i],
new_gradients[i],
err_msg='Operator GradCheck ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(gradients[i])
+ '\n'
+ 'But Got'
+ str(new_gradients[i])
+ ' in class '
+ self.__class__.__name__,
) )
set_flags({"FLAGS_enable_new_ir_in_executor": False}) for i in range(len(new_gradients)):
np.testing.assert_array_equal(
gradients[i],
new_gradients[i],
err_msg='Operator GradCheck ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(gradients[i])
+ '\n'
+ 'But Got'
+ str(new_gradients[i])
+ ' in class '
+ self.__class__.__name__,
)
finally:
set_flags(stored_flag)
def _get_gradient( def _get_gradient(
self, self,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册