diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index a01b327730db687fb4094f516aa3e239a270b683..86438d88438d07becb6539aec305021ce07298de 100644 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -626,6 +626,8 @@ EOF function run_mac_test() { + export FLAGS_NEW_IR_OPTEST=True + export FLAGS_CI_PIPELINE=mac mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build if [ ${WITH_TESTING:-ON} == "ON" ] ; then @@ -771,6 +773,8 @@ EOF } function run_linux_cpu_test() { + export FLAGS_NEW_IR_OPTEST=True + export FLAGS_CI_PIPELINE=py3 mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build pip install hypothesis diff --git a/test/legacy_test/CMakeLists.txt b/test/legacy_test/CMakeLists.txt index 064108b286f844071978dceea9ee9efbebe6344b..688cbe2a2575aefa8b409909068c9360269b1783 100644 --- a/test/legacy_test/CMakeLists.txt +++ b/test/legacy_test/CMakeLists.txt @@ -1345,3 +1345,14 @@ set_tests_properties(test_sync_batch_norm_op_static_build PROPERTIES LABELS "RUN_TYPE=DIST") set_tests_properties(test_sync_batch_norm_op_static_build PROPERTIES TIMEOUT 250) +file(STRINGS "${CMAKE_SOURCE_DIR}/test/new_ir_op_test_white_list" + NEW_IR_OP_TESTS) +foreach(IR_OP_TEST ${NEW_IR_OP_TESTS}) + if(TEST ${IR_OP_TEST}) + set_tests_properties( + ${IR_OP_TEST} PROPERTIES ENVIRONMENT + "FLAGS_NEW_IR_OPTEST_WHITE_LIST=True") + else() + message(WARNING "not found ${IR_OP_TEST}") + endif() +endforeach() diff --git a/test/legacy_test/eager_op_test.py b/test/legacy_test/eager_op_test.py index b4ba945b1f5726e649184d6a80a2e766ddf173ae..b15f88fea03f1bfa43a4451449b7bd0992c09067 100644 --- a/test/legacy_test/eager_op_test.py +++ b/test/legacy_test/eager_op_test.py @@ -47,6 +47,7 @@ from paddle.fluid.framework import ( Program, _current_expected_place, canonicalize_attrs, + set_flags, ) from paddle.fluid.wrapped_decorator import signature_safe_contextmanager @@ -386,6 +387,7 @@ class OpTest(unittest.TestCase): cls.input_shape_is_large = True cls.is_calc_ref = False cls.check_prim = False + cls._check_cinn = False np.random.seed(123) random.seed(124) @@ -1125,6 +1127,43 @@ class OpTest(unittest.TestCase): ) return outputs + def _check_ir_output(self, place, program, feed_map, fetch_list, outs): + if os.getenv("FLAGS_NEW_IR_OPTEST") is None: + return + if os.getenv("FLAGS_NEW_IR_OPTEST_WHITE_LIST") is None: + return + if self.check_prim: + return + if self._check_cinn: + return + + set_flags({"FLAGS_enable_new_ir_in_executor": True}) + + executor = Executor(place) + ir_outs = executor.run( + program, + feed=feed_map, + fetch_list=fetch_list, + return_numpy=False, + ) + np.testing.assert_array_equal( + outs, + ir_outs, + err_msg='Operator (' + + self.op_type + + ') has diff at ' + + str(place) + + '\nExpect ' + + str(outs) + + '\n' + + 'But Got' + + str(ir_outs) + + ' in class ' + + self.__class__.__name__, + ) + + set_flags({"FLAGS_enable_new_ir_in_executor": False}) + def _calc_output( self, place, @@ -1192,6 +1231,7 @@ class OpTest(unittest.TestCase): build_strategy.enable_inplace = enable_inplace if enable_cinn_test: build_strategy.build_cinn_pass = check_cinn + self._check_cinn = enable_cinn_test compiled_prog = fluid.CompiledProgram( program, build_strategy=build_strategy @@ -1205,6 +1245,9 @@ class OpTest(unittest.TestCase): fetch_list=fetch_list, return_numpy=False, ) + + self._check_ir_output(place, program, feed_map, fetch_list, outs) + self.op = op self.program = original_program if for_inplace_test: @@ -2753,6 +2796,53 @@ class OpTest(unittest.TestCase): output_names.append(cast_output.name) return output_names + def _check_ir_grad_output( + self, place, program, scope, feed_dict, fetch_list, gradients + ): + if os.getenv("FLAGS_NEW_IR_OPTEST") is None: + return + if os.getenv("FLAGS_NEW_IR_OPTEST_WHITE_LIST") is None: + return + if self.check_prim: + return + if self._check_cinn: + return + + set_flags({"FLAGS_enable_new_ir_in_executor": True}) + + executor = Executor(place) + new_gradients = list( + map( + np.array, + executor.run( + program, + feed_dict, + fetch_list, + scope=scope, + return_numpy=False, + ), + ) + ) + + for i in range(len(new_gradients)): + np.testing.assert_array_equal( + gradients[i], + new_gradients[i], + err_msg='Operator GradCheck (' + + self.op_type + + ') has diff at ' + + str(place) + + '\nExpect ' + + str(gradients[i]) + + '\n' + + 'But Got' + + str(new_gradients[i]) + + ' in class ' + + self.__class__.__name__, + ) + + set_flags({"FLAGS_enable_new_ir_in_executor": False}) + def _get_gradient( self, input_to_check, @@ -2766,6 +2856,7 @@ class OpTest(unittest.TestCase): with paddle.fluid.framework._static_guard(): prog = Program() scope = core.Scope() + ir_scope = core.Scope() block = prog.global_block() self._append_ops(block) @@ -2820,6 +2911,11 @@ class OpTest(unittest.TestCase): tensor = true_var.get_tensor() tensor.set(grad_out_value, place) grad_outputs.append(var) + if os.getenv("FLAGS_NEW_IR_OPTEST") is not None: + ir_true_var = ir_scope.var(var.name) + ir_tensor = ir_true_var.get_tensor() + ir_tensor.set(grad_out_value, place) + targets = [ outputs[name] for name in outputs if name in output_names ] @@ -2829,7 +2925,7 @@ class OpTest(unittest.TestCase): grad_inputs = paddle.static.gradients( targets, inputs, grad_outputs, no_grad_set ) - fetch_list = grad_inputs + fetch_list = [grad.name for grad in grad_inputs] enable_cinn_test = check_cinn and self._enable_check_cinn_test( place, feed_dict, outputs @@ -2849,6 +2945,7 @@ class OpTest(unittest.TestCase): if enable_cinn_test: build_strategy = fluid.BuildStrategy() build_strategy.build_cinn_pass = check_cinn + self._check_cinn = True compiled_prog = fluid.CompiledProgram(prog, build_strategy) prog = compiled_prog @@ -2865,6 +2962,11 @@ class OpTest(unittest.TestCase): ), ) ) + + self._check_ir_grad_output( + place, prog, ir_scope, feed_dict, fetch_list, res + ) + return res diff --git a/test/new_ir_op_test_white_list b/test/new_ir_op_test_white_list new file mode 100644 index 0000000000000000000000000000000000000000..220a565a478d9469e4da1e7dd713601e82b69bcf --- /dev/null +++ b/test/new_ir_op_test_white_list @@ -0,0 +1,2 @@ +test_atan2_op +test_elementwise_div_op