diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index 2dbf9dd1d71fabdf6a37d81293b69a56330e440f..508d298686941a532ad866f3cca91c41125e1a2c 100644 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -14,7 +14,7 @@ import paddle from paddle.distributed.fleet.proto import distributed_strategy_pb2 -from paddle.fluid.framework import Variable, set_flags, core +from paddle.fluid.framework import Variable, set_flags, core, _global_flags from paddle.fluid.wrapped_decorator import wrap_decorator import google.protobuf.text_format import google.protobuf @@ -121,18 +121,18 @@ class DistributedStrategy(object): # Set the default values of the following flags to the ones set by users key = 'FLAGS_cudnn_batchnorm_spatial_persistent' - if core.globals().is_public(key): + if _global_flags().is_public(key): self.strategy.cudnn_batchnorm_spatial_persistent = bool( - core.globals()[key]) + _global_flags()[key]) key = 'FLAGS_conv_workspace_size_limit' - if core.globals().is_public(key): - self.strategy.conv_workspace_size_limit = int(core.globals()[key]) + if _global_flags().is_public(key): + self.strategy.conv_workspace_size_limit = int(_global_flags()[key]) key = 'FLAGS_cudnn_exhaustive_search' - if core.globals().is_public(key): - self.strategy.cudnn_exhaustive_search = bool(core.globals()[key]) + if _global_flags().is_public(key): + self.strategy.cudnn_exhaustive_search = bool(_global_flags()[key]) key = 'FLAGS_sync_nccl_allreduce' - if core.globals().is_public(key): - self.strategy.sync_nccl_allreduce = bool(core.globals()[key]) + if _global_flags().is_public(key): + self.strategy.sync_nccl_allreduce = bool(_global_flags()[key]) self.__lock_attr = True @@ -1561,8 +1561,8 @@ class DistributedStrategy(object): ] for i, key in enumerate(keys): - if core.globals().is_public(key): - core.globals()[key] = values[i] + if _global_flags().is_public(key): + _global_flags()[key] = values[i] def _is_strict_auto(self): global non_auto_func_called diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 25412a86a8b940b9cba7210fbd17271955295bd1..708167a0273996fbb67eddec711ccff2aca5e759 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -456,7 +456,7 @@ def _addup_repetitive_outputs_(op_descs, block_idx): In these cases, the variable should be the accumulation of all the outputs. `sum_op`s are added to implement the accumulate. """ - _MAX_ADD_NUM_ = core.globals()['FLAGS_max_inplace_grad_add'] + _MAX_ADD_NUM_ = framework._global_flags()['FLAGS_max_inplace_grad_add'] #pending_sum_ops = [] pending_sum_ops = collections.OrderedDict() var_rename_count = collections.defaultdict(int) diff --git a/python/paddle/fluid/dygraph/layer_object_helper.py b/python/paddle/fluid/dygraph/layer_object_helper.py index a904f80639752a7538289a1ce7c2abf378ccc634..5bf5eda19a5d0c2e3aab1515ddb8855ba2db5017 100644 --- a/python/paddle/fluid/dygraph/layer_object_helper.py +++ b/python/paddle/fluid/dygraph/layer_object_helper.py @@ -16,7 +16,7 @@ from __future__ import print_function import copy import six -from ..framework import Parameter, in_dygraph_mode +from ..framework import Parameter, in_dygraph_mode, _global_flags from ..param_attr import ParamAttr from .. import core from six.moves import zip @@ -158,7 +158,7 @@ class LayerObjectHelper(LayerHelperBase): if (use_cudnn is not None) and use_cudnn: act['use_cudnn'] = use_cudnn - use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] if (use_mkldnn is not None) and use_mkldnn: act['use_mkldnn'] = use_mkldnn act_type = act.pop('type') diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index ce728f1121dfdbc04dc123c3976539ec143fc9d6..9d6e637342a7b6626c3f3b958c91fdee1a9c4eac 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -21,7 +21,7 @@ from ..layers import utils from ..layers import nn as F from .. import dygraph_utils from . import layers -from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator, default_main_program +from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator, default_main_program, _global_flags from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from ..param_attr import ParamAttr from ..initializer import Normal, Constant, NumpyArrayInitializer @@ -188,7 +188,7 @@ class Conv2D(layers.Layer): if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") self._use_cudnn = use_cudnn - self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] self._filter_size = filter_size self._num_filters = num_filters self._param_attr = param_attr @@ -837,7 +837,7 @@ class Pool2D(layers.Layer): if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") - self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] if data_format not in ["NCHW", "NHWC"]: raise ValueError( @@ -966,7 +966,7 @@ class Linear(layers.Layer): self.bias = self.create_parameter( shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True) - self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] def forward(self, input): if in_dygraph_mode(): @@ -1268,7 +1268,7 @@ class BatchNorm(layers.Layer): self._param_attr = param_attr self._bias_attr = bias_attr self._act = act - self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] assert bias_attr is not False, "bias_attr should not be False in batch_norm." diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 54e4e6f139191d4e6d2d34721eb21f0776a9907d..695c91fea819f57a12ec760d3eeb4965da6c23de 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -72,6 +72,7 @@ _dygraph_tracer_ = None _global_expected_place_ = None _current_device = None global_prog_seed = 0 +_global_flags_ = core.globals() def require_version(min_version, max_version=None): @@ -286,6 +287,10 @@ def _dygraph_tracer(): return _dygraph_tracer_ +def _global_flags(): + return _global_flags_ + + def _current_expected_place(): global _global_expected_place_ if _global_expected_place_ is None: @@ -5833,8 +5838,8 @@ def set_flags(flags): if not isinstance(flags, dict): raise TypeError('flags in set_flags should be a dict') for key, value in flags.items(): - if core.globals().is_public(key): - core.globals()[key] = value + if _global_flags().is_public(key): + _global_flags()[key] = value else: raise ValueError( "Flag %s cannot set its value through this function." % (key)) @@ -5863,8 +5868,8 @@ def get_flags(flags): flags_value = {} if isinstance(flags, (list, tuple)): for key in flags: - if (core.globals().is_public(key)): - value = core.globals()[key] + if (_global_flags().is_public(key)): + value = _global_flags()[key] temp = {key: value} flags_value.update(temp) else: @@ -5872,8 +5877,8 @@ def get_flags(flags): 'Flag %s cannot get its value through this function.' % (key)) elif isinstance(flags, str): - if (core.globals().is_public(flags)): - value = core.globals()[flags] + if (_global_flags().is_public(flags)): + value = _global_flags()[flags] temp = {flags: value} flags_value.update(temp) else: diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index db556913384785e1f11ba05dcc524ef1f1de92ab..2b677c11e9d96b7f412f3bdbb0322d4bcf98c472 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -17,7 +17,7 @@ from __future__ import print_function import copy import six -from .framework import Parameter, dtype_is_floating, in_dygraph_mode, OpProtoHolder +from .framework import Parameter, dtype_is_floating, in_dygraph_mode, OpProtoHolder, _global_flags from . import unique_name from paddle.fluid.initializer import Constant, Xavier from .param_attr import ParamAttr @@ -148,7 +148,7 @@ class LayerHelper(LayerHelperBase): if 'use_cudnn' in self.kwargs and self.kwargs.get('use_cudnn'): act['use_cudnn'] = self.kwargs.get('use_cudnn') use_mkldnn = self.kwargs.get( - 'use_mkldnn', core.globals().get("FLAGS_use_mkldnn", False)) + 'use_mkldnn', _global_flags().get("FLAGS_use_mkldnn", False)) if use_mkldnn: act['use_mkldnn'] = use_mkldnn act_type = act.pop('type') diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d7c95dc4669bebfcdbf8fed956d4e9f51a1e1e9a..e02edb72ce1f71a2f877c9b210f51567cb0a2607 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -26,7 +26,7 @@ import six import paddle from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer -from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only +from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags from .. import dygraph_utils from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ @@ -9500,7 +9500,7 @@ def relu6(x, threshold=6.0, name=None): outputs={'Out': out}, attrs={ 'threshold': threshold, - 'use_mkldnn': core.globals()["FLAGS_use_mkldnn"] + 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"] }) return out @@ -11569,7 +11569,7 @@ Examples: axis=axis, act=act, op_name='elementwise_add', - use_mkldnn=core.globals()["FLAGS_use_mkldnn"]) + use_mkldnn=_global_flags()["FLAGS_use_mkldnn"]) return _elementwise_op(LayerHelper('elementwise_add', **locals())) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py index c93201946b275715cf70ae549339cd3f41f5cac7..90614ccb3bc1543073c808a1a424227736c794e3 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -19,18 +19,19 @@ import numpy as np import paddle.fluid as fluid import os from paddle.fluid.layer_helper import LayerHelper +from paddle.fluid.framework import _global_flags def check(): - print("check: fluid.core.globals()['FLAGS_use_mkldnn']=", - fluid.core.globals()["FLAGS_use_mkldnn"]) + print("check: _global_flags()['FLAGS_use_mkldnn']=", + _global_flags()["FLAGS_use_mkldnn"]) print("check: fluid.get_flags('FLAGS_use_mkldnn')=", fluid.get_flags(['FLAGS_use_mkldnn'])) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) print("check: FLAGS_tracer_mkldnn_ops_on=", - fluid.core.globals()['FLAGS_tracer_mkldnn_ops_on']) + _global_flags()['FLAGS_tracer_mkldnn_ops_on']) print("check: FLAGS_tracer_mkldnn_ops_off=", - fluid.core.globals()['FLAGS_tracer_mkldnn_ops_off']) + _global_flags()['FLAGS_tracer_mkldnn_ops_off']) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) b_np = np.random.uniform(-5, 5, (10, 20, 30)).astype(np.float32) helper = LayerHelper(fluid.unique_name.generate(str("test")), act="relu") diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py index 8f5715a0d0afcf59ebbe1cc95a6b06dead64c6e2..3d9ef39680dc059d3823e6ad89081549e9f693a2 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py @@ -19,11 +19,12 @@ import numpy as np import paddle.fluid as fluid import os from paddle.fluid.layer_helper import LayerHelper +from paddle.fluid.framework import _global_flags def check(): - print("check: fluid.core.globals()['FLAGS_use_mkldnn']=", - fluid.core.globals()["FLAGS_use_mkldnn"]) + print("check: _global_flags()['FLAGS_use_mkldnn']=", + _global_flags()["FLAGS_use_mkldnn"]) print("check: fluid.get_flags('FLAGS_use_mkldnn')=", fluid.get_flags(['FLAGS_use_mkldnn'])) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 67958b8683fe174d2c9e387668ab8c7ee4a39276..66913f3ad2f659456d8def39852f3423fdd3dd6c 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function +from paddle.fluid.framework import _global_flags import numpy as np from ...device import get_cudnn_version @@ -537,7 +538,7 @@ def conv2d(x, use_cudnn = True if (core.is_compiled_with_cuda() and cudnn_version is not None) else False - use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] # update attrs padding, padding_algorithm = _update_padding_nd(padding, channel_last, 2)