diff --git a/python/paddle/distributed/auto_parallel/static/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py index 3d7096441dac5c7dd169e21ec3e8c19b33ef5467..0ef5102a5499099e8ed3014955a019d46b415f27 100644 --- a/python/paddle/distributed/auto_parallel/static/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -833,14 +833,6 @@ class Engine: dist_main_program, self._place, dist_context ) - # NOTE(zhaoyinglia): Skip startup program when use new ir temporarily. - use_new_ir = False - if auto_utils.use_new_ir(): - use_new_ir = True - paddle.framework.set_flags( - {"FLAGS_enable_new_ir_in_executor": False} - ) - if self._executor is None: self._executor = paddle.static.Executor(self._place) uninitialized = [] @@ -868,11 +860,6 @@ class Engine: ] self._executor.run(dist_startup_prog) - if use_new_ir: - paddle.framework.set_flags( - {"FLAGS_enable_new_ir_in_executor": True} - ) - def fit( self, train_data, diff --git a/python/paddle/distributed/auto_parallel/static/utils.py b/python/paddle/distributed/auto_parallel/static/utils.py index 79597fd693218acaf201e9b2db0367d80d040799..fa12cfd68e3b2e0d80d8900a3ba72feb9fe88f99 100644 --- a/python/paddle/distributed/auto_parallel/static/utils.py +++ b/python/paddle/distributed/auto_parallel/static/utils.py @@ -2423,19 +2423,6 @@ def use_new_executor(): ] -def use_new_ir(): - enable_new_ir_in_executor = os.environ.get( - 'FLAGS_enable_new_ir_in_executor', None - ) - return enable_new_ir_in_executor in [ - 1, - '1', - True, - 'True', - 'true', - ] - - def get_pp_stage(dist_context, rank): pp_idx = None for idx, process_mesh in enumerate(dist_context.process_meshes):