未验证 提交 ca7bd2be 编写于 作者: G guofei 提交者: GitHub

Add a function to update FLAGS (#22851)

* Add a function to update FLAGS

test=develop

* Add a function to update FLAGS

test=develop

* expr flags

* Add a function to update FLAGS

test=develop

* distinguish public/private vars, test=develop

* fix windows issues, test=develop

* expr flag

* Add functions to get and set FLAGS

test=develop

* Add functions to get and set FLAGS

test=develop

* Add functions to get and set FLAGS

test=develop

* Add functions to get and set flags

test=develop

* Add functions to get and set FLAGS

test=develop

* Add a function to update FLAGS

test=develop

* Add a function to update FLAGS

test=develop

* Add functions to get and set flags in Paddle

test=develop
Co-authored-by: Nsneaxiy <sneaxiy@126.com>
上级 cb1a2512
......@@ -27,18 +27,64 @@
#include "paddle/fluid/platform/macros.h"
#include "pybind11/stl.h"
DECLARE_double(eager_delete_tensor_gb);
// data processing
DECLARE_bool(use_mkldnn);
// debug
DECLARE_bool(check_nan_inf);
DECLARE_bool(cpu_deterministic);
DECLARE_bool(enable_rpc_profiler);
DECLARE_int32(multiple_of_cupti_buffer_size);
DECLARE_bool(reader_queue_speed_test_mode);
// device management
DECLARE_int32(paddle_num_threads);
// executor
DECLARE_bool(enable_parallel_graph);
DECLARE_string(pe_profile_fname);
DECLARE_string(print_sub_graph_dir);
DECLARE_bool(use_ngraph);
DECLARE_bool(use_system_allocator);
// memory management
DECLARE_string(allocator_strategy);
DECLARE_double(eager_delete_tensor_gb);
DECLARE_double(fraction_of_cpu_memory_to_use);
DECLARE_bool(free_idle_chunk);
DECLARE_bool(free_when_no_cache_hit);
DECLARE_int32(fuse_parameter_groups_size);
DECLARE_double(fuse_parameter_memory_size);
DECLARE_bool(init_allocated_mem);
DECLARE_uint64(initial_cpu_memory_in_mb);
DECLARE_double(memory_fraction_of_eager_deletion);
DECLARE_bool(use_pinned_memory);
DECLARE_bool(use_system_allocator);
// others
DECLARE_bool(benchmark);
DECLARE_int32(inner_op_parallelism);
DECLARE_string(tracer_profile_fname);
#ifdef PADDLE_WITH_CUDA
DECLARE_uint64(gpu_memory_limit_mb);
// cudnn
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
DECLARE_bool(cudnn_deterministic);
DECLARE_bool(cudnn_exhaustive_search);
// data processing
DECLARE_bool(enable_cublas_tensor_op_math);
// device management
DECLARE_string(selected_gpus);
// memory management
DECLARE_bool(eager_delete_scope);
DECLARE_bool(fast_eager_deletion_mode);
DECLARE_double(fraction_of_cuda_pinned_memory_to_use);
DECLARE_double(fraction_of_gpu_memory_to_use);
DECLARE_uint64(gpu_memory_limit_mb);
DECLARE_uint64(initial_gpu_memory_in_mb);
DECLARE_uint64(reallocate_gpu_memory_in_mb);
// others
DECLARE_bool(sync_nccl_allreduce);
#endif
#ifdef PADDLE_WITH_DISTRIBUTE
DECLARE_int32(rpc_send_thread_num);
DECLARE_int32(rpc_get_thread_num);
DECLARE_int32(rpc_prefetch_thread_num);
#endif
DECLARE_string(allocator_strategy);
DECLARE_bool(enable_parallel_graph);
namespace paddle {
namespace pybind {
......@@ -290,13 +336,32 @@ static void RegisterGlobalVarGetterSetter() {
REGISTER_PUBLIC_GLOBAL_VAR(
FLAGS_eager_delete_tensor_gb, FLAGS_enable_parallel_graph,
FLAGS_allocator_strategy, FLAGS_use_system_allocator);
FLAGS_allocator_strategy, FLAGS_use_system_allocator, FLAGS_check_nan_inf,
FLAGS_cpu_deterministic, FLAGS_enable_rpc_profiler,
FLAGS_multiple_of_cupti_buffer_size, FLAGS_reader_queue_speed_test_mode,
FLAGS_pe_profile_fname, FLAGS_print_sub_graph_dir,
FLAGS_fraction_of_cpu_memory_to_use, FLAGS_fuse_parameter_groups_size,
FLAGS_fuse_parameter_memory_size, FLAGS_init_allocated_mem,
FLAGS_initial_cpu_memory_in_mb, FLAGS_memory_fraction_of_eager_deletion,
FLAGS_use_pinned_memory, FLAGS_benchmark, FLAGS_inner_op_parallelism,
FLAGS_tracer_profile_fname, FLAGS_paddle_num_threads);
#ifdef PADDLE_WITH_CUDA
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_gpu_memory_limit_mb,
FLAGS_cudnn_deterministic);
REGISTER_PUBLIC_GLOBAL_VAR(
FLAGS_gpu_memory_limit_mb, FLAGS_cudnn_deterministic,
FLAGS_conv_workspace_size_limit, FLAGS_cudnn_batchnorm_spatial_persistent,
FLAGS_cudnn_exhaustive_search, FLAGS_eager_delete_scope,
FLAGS_fast_eager_deletion_mode,
FLAGS_fraction_of_cuda_pinned_memory_to_use,
FLAGS_fraction_of_gpu_memory_to_use, FLAGS_initial_gpu_memory_in_mb,
FLAGS_reallocate_gpu_memory_in_mb, FLAGS_enable_cublas_tensor_op_math,
FLAGS_selected_gpus, FLAGS_sync_nccl_allreduce);
#endif
#ifdef PADDLE_WITH_DITRIBUTE
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_send_thread_num,
FLAGS_rpc_get_thread_num,
FLAGS_rpc_prefetch_thread_num);
#endif
}
} // namespace pybind
} // namespace paddle
......@@ -52,6 +52,8 @@ __all__ = [
'load_op_library',
'require_version',
'device_guard',
'set_flags',
'get_flags',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
......@@ -5107,3 +5109,70 @@ def device_guard(device=None):
pre_device = switch_device(device)
yield
switch_device(pre_device)
def set_flags(flags):
"""
This function sets the GFlags value in Paddle.
Args:
flags (dict): A dict contains flags and its value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})
"""
if not isinstance(flags, dict):
raise TypeError('flags in set_flags should be a dict')
for key, value in flags.items():
if core.globals().is_public(key):
core.globals()[key] = value
else:
raise ValueError(
"Flag %s cannot set its value through this function." % (key))
def get_flags(flags):
"""
This function gets the GFlags value in Paddle.
Args:
flags(list|tuple|str): A list/tuple of string or a string which is the flag's name.
Returns:
flag's value in Paddle.
Examples:
.. code-block:: python
import paddle.fluid as fluid
flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
res = fluid.get_flags(flags)
print(res)
# {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}
"""
flags_value = {}
if isinstance(flags, (list, tuple)):
for key in flags:
if (core.globals().is_public(key)):
value = core.globals()[key]
temp = {key: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' %
(key))
elif isinstance(flags, str):
if (core.globals().is_public(flags)):
value = core.globals()[flags]
temp = {flags: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' % (flags))
else:
raise TypeError('Flags in get_flags should be a list, tuple or string.')
return flags_value
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import unittest as unittest
class TestGetAndSetFlags(unittest.TestCase):
def test_api(self):
flags = {
'FLAGS_eager_delete_tensor_gb': 1.0,
'FLAGS_check_nan_inf': True
}
fluid.set_flags(flags)
flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
flag = 'FLAGS_eager_delete_tensor_gb'
res_list = fluid.get_flags(flags_list)
res = fluid.get_flags(flag)
self.assertTrue(res_list['FLAGS_eager_delete_tensor_gb'], 1.0)
self.assertTrue(res_list['FLAGS_check_nan_inf'], True)
self.assertTrue(res['FLAGS_eager_delete_tensor_gb'], 1.0)
class TestGetAndSetFlagsErrors(unittest.TestCase):
def test_errors(self):
flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
flag = 1
flag_private = {'FLAGS_use_mkldnn': True}
# flags type of set_flags should be dict.
def test_set_flags_input_type():
fluid.set_flags(flags_list)
self.assertRaises(TypeError, test_set_flags_input_type)
# flags in set_flags should be public flags.
def test_set_private_flag():
fluid.get_flags('FLAGS_use_mkldnn')
self.assertRaises(ValueError, test_set_private_flag)
# flags type of set_flags should be list, tuple or string
def test_get_flags_input_type():
fluid.get_flags(flag)
self.assertRaises(TypeError, test_get_flags_input_type)
# flags in get_flags should be public flags.
def test_get_private_flag():
fluid.get_flags('FLAGS_use_mkldnn')
self.assertRaises(ValueError, test_get_private_flag)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册