diff --git a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py index 8919ded2e245c951016f28851b2ab4b16ce2b48f..a1ab4747235273e99a80da4a21aebdb5c1dbd293 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py @@ -15,7 +15,6 @@ import os from paddle import static from paddle.fluid import core -from paddle.framework import _global_flags from paddle.framework.ir import apply_build_strategy from paddle.utils import unique_name @@ -31,6 +30,14 @@ from .common import ( from .meta_optimizer_base import MetaOptimizerBase +def evaluate_flag_apply_pass_to_program(val: str) -> bool: + val = val.lower() + if val in ('false', 'off', '0'): + return False + else: + return True + + class RawProgramOptimizer(MetaOptimizerBase): def __init__(self, optimizer): super().__init__(optimizer) @@ -153,7 +160,11 @@ class RawProgramOptimizer(MetaOptimizerBase): optimize_ops, params_grads = self.inner_opt.minimize( loss, startup_program, parameter_list, no_grad_set ) - if _global_flags()['FLAGS_apply_pass_to_program']: + # Not apply pass only when FLAGS_apply_pass_to_program explicitly set to False + is_apply_pass_to_program = os.environ.get( + 'FLAGS_apply_pass_to_program', '1' + ) + if evaluate_flag_apply_pass_to_program(is_apply_pass_to_program): pass_attrs = {"use_cuda": True} build_strategy = self.user_defined_strategy.build_strategy._copy() build_strategy.fuse_all_optimizer_ops = False