From 13ca364ceba962bbd7eb61bcd2a5dce265678ab4 Mon Sep 17 00:00:00 2001 From: Zeng Jinle <32832641+sneaxiy@users.noreply.github.com> Date: Wed, 18 Sep 2019 23:15:40 +0800 Subject: [PATCH] remove some flags and add comments to some flags, test=develop (#19813) --- paddle/fluid/memory/detail/buddy_allocator.cc | 4 ---- paddle/fluid/platform/flags.cc | 24 +++++++++++++------ python/paddle/fluid/__init__.py | 14 +++++------ 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 2519a9587a..3e4af0a47c 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -19,10 +19,6 @@ limitations under the License. */ #include "glog/logging.h" -DEFINE_bool(free_idle_memory, false, - "If it is true, Paddle will try to free idle memory trunks during " - "running time."); - #ifdef PADDLE_WITH_CUDA DECLARE_uint64(reallocate_gpu_memory_in_mb); #endif diff --git a/paddle/fluid/platform/flags.cc b/paddle/fluid/platform/flags.cc index 0a3608311e..301f5273f8 100644 --- a/paddle/fluid/platform/flags.cc +++ b/paddle/fluid/platform/flags.cc @@ -104,7 +104,7 @@ DEFINE_bool(cudnn_deterministic, false, * CUDNN related FLAG * Name: FLAGS_conv_workspace_size_limit * Since Version: 0.13.0 - * Value Range: uint64, default=4096 (MB) + * Value Range: uint64, default=512 (MB) * Example: * Note: The internal function of cuDNN obtains the fastest matching algorithm * within this memory limit. Usually, faster algorithms can be chosen in @@ -316,10 +316,15 @@ DEFINE_string(allocator_strategy, "naive_best_fit", /** * Memory related FLAG * Name: FLAGS_fraction_of_cpu_memory_to_use - * Since Version: - * Value Range: + * Since Version: 0.12.0 + * Value Range: double, [0.0, 1.0], default=1 * Example: - * Note: + * Note: Represents the proportion of allocated CPU memory blocks + * to the total memory size of the CPU. Future CPU memory usage + * will be allocated from this memory block. If the memory block does + * not have enough CUDA pinned memory, new memory blocks of the same + * size as the memory block will be allocated from the CUDA pinned + * request util the CPU does not have enough memory. */ DEFINE_double(fraction_of_cpu_memory_to_use, 1, "Default use 100% of CPU memory for PaddlePaddle," @@ -343,10 +348,15 @@ DEFINE_uint64(initial_cpu_memory_in_mb, 500ul, /** * Memory related FLAG * Name: FLAGS_fraction_of_cuda_pinned_memory_to_use - * Since Version: - * Value Range: + * Since Version: 0.12.0 + * Value Range: double, [0.0, 1.0], default=0.5 * Example: - * Note: + * Note: Represents the proportion of allocated CUDA pinned memory blocks + * to the total memory size of the CPU. Future CUDA pinned memory usage + * will be allocated from this memory block. If the memory block does + * not have enough CPU memory, new memory blocks of the same + * size as the memory block will be allocated from the CPU + * request util the CPU does not have enough memory. */ DEFINE_double( fraction_of_cuda_pinned_memory_to_use, 0.5, diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 5817bbf9a1..0202ac6562 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -154,13 +154,13 @@ def __bootstrap__(): read_env_flags = [ 'check_nan_inf', 'fast_check_nan_inf', 'benchmark', 'eager_delete_scope', 'initial_cpu_memory_in_mb', 'init_allocated_mem', - 'free_idle_memory', 'paddle_num_threads', "dist_threadpool_size", - 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', - 'memory_fraction_of_eager_deletion', 'allocator_strategy', - 'reader_queue_speed_test_mode', 'print_sub_graph_dir', - 'pe_profile_fname', 'inner_op_parallelism', 'enable_parallel_graph', - 'fuse_parameter_groups_size', 'multiple_of_cupti_buffer_size', - 'fuse_parameter_memory_size', 'tracer_profile_fname', 'dygraph_debug' + 'paddle_num_threads', 'dist_threadpool_size', 'eager_delete_tensor_gb', + 'fast_eager_deletion_mode', 'memory_fraction_of_eager_deletion', + 'allocator_strategy', 'reader_queue_speed_test_mode', + 'print_sub_graph_dir', 'pe_profile_fname', 'inner_op_parallelism', + 'enable_parallel_graph', 'fuse_parameter_groups_size', + 'multiple_of_cupti_buffer_size', 'fuse_parameter_memory_size', + 'tracer_profile_fname', 'dygraph_debug' ] if 'Darwin' not in sysstr: read_env_flags.append('use_pinned_memory') -- GitLab