未验证 提交 89d3a46d 编写于 作者: K kangguangli 提交者: GitHub

[NewIR] enable new ir for unittests in white list (#55117)

* add ir output check in OpTest

* add ir grad check in op test

* add white list for ir op test

* fix

* open only in py3 and mac

(cherry picked from commit 6daa44da495afb0287c6b69ecefbe35bbc47cb50)
上级 41172d78
......@@ -626,6 +626,8 @@ EOF
function run_mac_test() {
export FLAGS_NEW_IR_OPTEST=True
export FLAGS_CI_PIPELINE=mac
mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
if [ ${WITH_TESTING:-ON} == "ON" ] ; then
......@@ -771,6 +773,8 @@ EOF
}
function run_linux_cpu_test() {
export FLAGS_NEW_IR_OPTEST=True
export FLAGS_CI_PIPELINE=py3
mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
pip install hypothesis
......
......@@ -1345,3 +1345,14 @@ set_tests_properties(test_sync_batch_norm_op_static_build
PROPERTIES LABELS "RUN_TYPE=DIST")
set_tests_properties(test_sync_batch_norm_op_static_build PROPERTIES TIMEOUT
250)
file(STRINGS "${CMAKE_SOURCE_DIR}/test/new_ir_op_test_white_list"
NEW_IR_OP_TESTS)
foreach(IR_OP_TEST ${NEW_IR_OP_TESTS})
if(TEST ${IR_OP_TEST})
set_tests_properties(
${IR_OP_TEST} PROPERTIES ENVIRONMENT
"FLAGS_NEW_IR_OPTEST_WHITE_LIST=True")
else()
message(WARNING "not found ${IR_OP_TEST}")
endif()
endforeach()
......@@ -47,6 +47,7 @@ from paddle.fluid.framework import (
Program,
_current_expected_place,
canonicalize_attrs,
set_flags,
)
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
......@@ -386,6 +387,7 @@ class OpTest(unittest.TestCase):
cls.input_shape_is_large = True
cls.is_calc_ref = False
cls.check_prim = False
cls._check_cinn = False
np.random.seed(123)
random.seed(124)
......@@ -1125,6 +1127,43 @@ class OpTest(unittest.TestCase):
)
return outputs
def _check_ir_output(self, place, program, feed_map, fetch_list, outs):
if os.getenv("FLAGS_NEW_IR_OPTEST") is None:
return
if os.getenv("FLAGS_NEW_IR_OPTEST_WHITE_LIST") is None:
return
if self.check_prim:
return
if self._check_cinn:
return
set_flags({"FLAGS_enable_new_ir_in_executor": True})
executor = Executor(place)
ir_outs = executor.run(
program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False,
)
np.testing.assert_array_equal(
outs,
ir_outs,
err_msg='Operator ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(outs)
+ '\n'
+ 'But Got'
+ str(ir_outs)
+ ' in class '
+ self.__class__.__name__,
)
set_flags({"FLAGS_enable_new_ir_in_executor": False})
def _calc_output(
self,
place,
......@@ -1192,6 +1231,7 @@ class OpTest(unittest.TestCase):
build_strategy.enable_inplace = enable_inplace
if enable_cinn_test:
build_strategy.build_cinn_pass = check_cinn
self._check_cinn = enable_cinn_test
compiled_prog = fluid.CompiledProgram(
program, build_strategy=build_strategy
......@@ -1205,6 +1245,9 @@ class OpTest(unittest.TestCase):
fetch_list=fetch_list,
return_numpy=False,
)
self._check_ir_output(place, program, feed_map, fetch_list, outs)
self.op = op
self.program = original_program
if for_inplace_test:
......@@ -2753,6 +2796,53 @@ class OpTest(unittest.TestCase):
output_names.append(cast_output.name)
return output_names
def _check_ir_grad_output(
self, place, program, scope, feed_dict, fetch_list, gradients
):
if os.getenv("FLAGS_NEW_IR_OPTEST") is None:
return
if os.getenv("FLAGS_NEW_IR_OPTEST_WHITE_LIST") is None:
return
if self.check_prim:
return
if self._check_cinn:
return
set_flags({"FLAGS_enable_new_ir_in_executor": True})
executor = Executor(place)
new_gradients = list(
map(
np.array,
executor.run(
program,
feed_dict,
fetch_list,
scope=scope,
return_numpy=False,
),
)
)
for i in range(len(new_gradients)):
np.testing.assert_array_equal(
gradients[i],
new_gradients[i],
err_msg='Operator GradCheck ('
+ self.op_type
+ ') has diff at '
+ str(place)
+ '\nExpect '
+ str(gradients[i])
+ '\n'
+ 'But Got'
+ str(new_gradients[i])
+ ' in class '
+ self.__class__.__name__,
)
set_flags({"FLAGS_enable_new_ir_in_executor": False})
def _get_gradient(
self,
input_to_check,
......@@ -2766,6 +2856,7 @@ class OpTest(unittest.TestCase):
with paddle.fluid.framework._static_guard():
prog = Program()
scope = core.Scope()
ir_scope = core.Scope()
block = prog.global_block()
self._append_ops(block)
......@@ -2820,6 +2911,11 @@ class OpTest(unittest.TestCase):
tensor = true_var.get_tensor()
tensor.set(grad_out_value, place)
grad_outputs.append(var)
if os.getenv("FLAGS_NEW_IR_OPTEST") is not None:
ir_true_var = ir_scope.var(var.name)
ir_tensor = ir_true_var.get_tensor()
ir_tensor.set(grad_out_value, place)
targets = [
outputs[name] for name in outputs if name in output_names
]
......@@ -2829,7 +2925,7 @@ class OpTest(unittest.TestCase):
grad_inputs = paddle.static.gradients(
targets, inputs, grad_outputs, no_grad_set
)
fetch_list = grad_inputs
fetch_list = [grad.name for grad in grad_inputs]
enable_cinn_test = check_cinn and self._enable_check_cinn_test(
place, feed_dict, outputs
......@@ -2849,6 +2945,7 @@ class OpTest(unittest.TestCase):
if enable_cinn_test:
build_strategy = fluid.BuildStrategy()
build_strategy.build_cinn_pass = check_cinn
self._check_cinn = True
compiled_prog = fluid.CompiledProgram(prog, build_strategy)
prog = compiled_prog
......@@ -2865,6 +2962,11 @@ class OpTest(unittest.TestCase):
),
)
)
self._check_ir_grad_output(
place, prog, ir_scope, feed_dict, fetch_list, res
)
return res
......
test_atan2_op
test_elementwise_div_op
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册