diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 1d78a650f905df9832a255a51eba5eef617dc5cf..a485838a95942538259a50cd6097891419d7f58d 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -19,7 +19,7 @@ #include "paddle/fluid/framework/details/variable_visitor.h" #include "paddle/fluid/platform/profiler.h" -DEFINE_bool( +PADDLE_DEFINE_EXPORTED_bool( cpu_deterministic, false, "Whether to make the result of computation deterministic in CPU side."); diff --git a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc index ffd80f0c90a1cea52d567961dbad55bfd68daee0..08e7c6f5b8689c48ac8fe420c42a88179039d967 100644 --- a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc +++ b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc @@ -25,13 +25,14 @@ class VarDesc; } // namespace framework } // namespace paddle -DEFINE_double(fuse_parameter_memory_size, -1.0, // MBytes - "fuse_parameter_memory_size is up limited memory size(MB)" - "of one group parameters' gradient which is the input " - "of communication calling(e.g NCCLAllReduce). " - "The default value is 0, it means that " - "not set group according to memory_size."); -DEFINE_int32( +PADDLE_DEFINE_EXPORTED_double( + fuse_parameter_memory_size, -1.0, // MBytes + "fuse_parameter_memory_size is up limited memory size(MB)" + "of one group parameters' gradient which is the input " + "of communication calling(e.g NCCLAllReduce). " + "The default value is 0, it means that " + "not set group according to memory_size."); +PADDLE_DEFINE_EXPORTED_int32( fuse_parameter_groups_size, 1, "fuse_parameter_groups_size is the up limited size of one group " "parameters' gradient. " diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index a174aa88d937bf2b9786863b5e21dedd2fc1af8f..036fde8fac6d911f8a97dbc097fae7f9fdd2ab6f 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -17,8 +17,8 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/operator.h" -DEFINE_bool(convert_all_blocks, true, - "Convert all blocks in program into SSAgraphs"); +PADDLE_DEFINE_EXPORTED_bool(convert_all_blocks, true, + "Convert all blocks in program into SSAgraphs"); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index a73bc487c92cc92413e33cb51755a29d01004bee..5f7bfc61b422971516ceb35dd17f45facfbf12f1 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -18,9 +18,9 @@ limitations under the License. */ #include "paddle/fluid/framework/op_proto_maker.h" DECLARE_bool(convert_all_blocks); -DEFINE_string(print_sub_graph_dir, "", - "FLAGS_print_sub_graph_dir is used " - "to print the nodes of sub_graphs."); +PADDLE_DEFINE_EXPORTED_string(print_sub_graph_dir, "", + "FLAGS_print_sub_graph_dir is used " + "to print the nodes of sub_graphs."); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 9687ef2ff91d867edee74c161a7ae14ce767f0c8..b8bb6d21ebcae5e564ef5306c529f9fad10ce9ad 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -19,7 +19,8 @@ #include "paddle/fluid/framework/details/share_tensor_buffer_functor.h" -DEFINE_bool(new_executor_use_inplace, true, "Use inplace in new executor"); +PADDLE_DEFINE_EXPORTED_bool(new_executor_use_inplace, true, + "Use inplace in new executor"); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 0d5db737441db50cbe21acefaf7d586f881e1808..670cb36dcc3aba36115928e6aeb5e7e42ac86c13 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -47,7 +47,8 @@ class LoDTensor; DECLARE_bool(benchmark); DECLARE_bool(check_nan_inf); DECLARE_bool(enable_unused_var_check); -DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op"); +PADDLE_DEFINE_EXPORTED_int32(inner_op_parallelism, 0, + "number of threads for inner op"); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 516a3bc63cad6815fe801038f0a491d899d90920..adbbfb380bc45f80acf66feb9faf9c6f57828f5d 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -46,11 +46,13 @@ DECLARE_double(eager_delete_tensor_gb); #ifdef WITH_GPERFTOOLS #include "gperftools/profiler.h" #endif -DEFINE_string(pe_profile_fname, "", - "Profiler filename for PE, which generated by gperftools." - "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable."); -DEFINE_bool(enable_parallel_graph, false, - "Force disable parallel graph execution mode if set false."); +PADDLE_DEFINE_EXPORTED_string( + pe_profile_fname, "", + "Profiler filename for PE, which generated by gperftools." + "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable."); +PADDLE_DEFINE_EXPORTED_bool( + enable_parallel_graph, false, + "Force disable parallel graph execution mode if set false."); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index d299f1769253a202107c9b32925aea7b599b4d93..932974855a28eca53c99e7fd3186a749a15be6d3 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -19,7 +19,7 @@ limitations under the License. */ DECLARE_bool(benchmark); -DEFINE_bool( +PADDLE_DEFINE_EXPORTED_bool( eager_delete_scope, true, "Delete local scope eagerly. It will reduce GPU memory usage but " "slow down the destruction of variables.(around 1% performance harm)"); diff --git a/paddle/fluid/framework/unused_var_check.cc b/paddle/fluid/framework/unused_var_check.cc index f8ace3e85a643e8166da2b2e6f35a8097761b8cd..2f03dc41ce0027bcc3cc28cb30ff740cdb22754b 100644 --- a/paddle/fluid/framework/unused_var_check.cc +++ b/paddle/fluid/framework/unused_var_check.cc @@ -23,9 +23,10 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/enforce.h" -DEFINE_bool(enable_unused_var_check, false, - "Checking whether operator contains unused inputs, " - "especially for grad operator. It should be in unittest."); +PADDLE_DEFINE_EXPORTED_bool( + enable_unused_var_check, false, + "Checking whether operator contains unused inputs, " + "especially for grad operator. It should be in unittest."); namespace paddle { namespace framework { diff --git a/paddle/fluid/pybind/global_value_getter_setter.cc b/paddle/fluid/pybind/global_value_getter_setter.cc index b01e40750f3358eae9e8c4c38332d5f0d7f0dce2..e7f1bef4bee623880729ebd39714124ff69a1ab6 100644 --- a/paddle/fluid/pybind/global_value_getter_setter.cc +++ b/paddle/fluid/pybind/global_value_getter_setter.cc @@ -38,27 +38,6 @@ PADDLE_FORCE_LINK_FLAG(free_idle_chunk); PADDLE_FORCE_LINK_FLAG(free_when_no_cache_hit); -// debug -DECLARE_bool(cpu_deterministic); - -// IR -DECLARE_bool(convert_all_blocks); - -// executor -DECLARE_bool(enable_parallel_graph); -DECLARE_string(pe_profile_fname); -DECLARE_string(print_sub_graph_dir); -DECLARE_bool(new_executor_use_inplace); - -// memory management -DECLARE_bool(eager_delete_scope); -DECLARE_int32(fuse_parameter_groups_size); -DECLARE_double(fuse_parameter_memory_size); - -// others -DECLARE_int32(inner_op_parallelism); -DECLARE_bool(enable_unused_var_check); - // NOTE: where are these 2 flags from? #ifdef PADDLE_WITH_DISTRIBUTE DECLARE_int32(rpc_get_thread_num); @@ -253,18 +232,6 @@ struct RegisterGetterSetterVisitor : public boost::static_visitor { }; static void RegisterGlobalVarGetterSetter() { - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_cpu_deterministic); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_convert_all_blocks); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_enable_parallel_graph); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_pe_profile_fname); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_print_sub_graph_dir); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_new_executor_use_inplace); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_eager_delete_scope); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_fuse_parameter_groups_size); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_fuse_parameter_memory_size); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_inner_op_parallelism); - REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_enable_unused_var_check); - #ifdef PADDLE_WITH_DITRIBUTE REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_get_thread_num); REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_prefetch_thread_num);