未验证 提交 d22914fd 编写于 作者: Z Zeng Jinle 提交者: GitHub

change to PADDLE_DEFINE_EXPORTED (#35841)

上级 fcfb0afe
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "paddle/fluid/framework/details/variable_visitor.h" #include "paddle/fluid/framework/details/variable_visitor.h"
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
DEFINE_bool( PADDLE_DEFINE_EXPORTED_bool(
cpu_deterministic, false, cpu_deterministic, false,
"Whether to make the result of computation deterministic in CPU side."); "Whether to make the result of computation deterministic in CPU side.");
......
...@@ -25,13 +25,14 @@ class VarDesc; ...@@ -25,13 +25,14 @@ class VarDesc;
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
DEFINE_double(fuse_parameter_memory_size, -1.0, // MBytes PADDLE_DEFINE_EXPORTED_double(
"fuse_parameter_memory_size is up limited memory size(MB)" fuse_parameter_memory_size, -1.0, // MBytes
"of one group parameters' gradient which is the input " "fuse_parameter_memory_size is up limited memory size(MB)"
"of communication calling(e.g NCCLAllReduce). " "of one group parameters' gradient which is the input "
"The default value is 0, it means that " "of communication calling(e.g NCCLAllReduce). "
"not set group according to memory_size."); "The default value is 0, it means that "
DEFINE_int32( "not set group according to memory_size.");
PADDLE_DEFINE_EXPORTED_int32(
fuse_parameter_groups_size, 1, fuse_parameter_groups_size, 1,
"fuse_parameter_groups_size is the up limited size of one group " "fuse_parameter_groups_size is the up limited size of one group "
"parameters' gradient. " "parameters' gradient. "
......
...@@ -17,8 +17,8 @@ limitations under the License. */ ...@@ -17,8 +17,8 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
DEFINE_bool(convert_all_blocks, true, PADDLE_DEFINE_EXPORTED_bool(convert_all_blocks, true,
"Convert all blocks in program into SSAgraphs"); "Convert all blocks in program into SSAgraphs");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -18,9 +18,9 @@ limitations under the License. */ ...@@ -18,9 +18,9 @@ limitations under the License. */
#include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/op_proto_maker.h"
DECLARE_bool(convert_all_blocks); DECLARE_bool(convert_all_blocks);
DEFINE_string(print_sub_graph_dir, "", PADDLE_DEFINE_EXPORTED_string(print_sub_graph_dir, "",
"FLAGS_print_sub_graph_dir is used " "FLAGS_print_sub_graph_dir is used "
"to print the nodes of sub_graphs."); "to print the nodes of sub_graphs.");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -19,7 +19,8 @@ ...@@ -19,7 +19,8 @@
#include "paddle/fluid/framework/details/share_tensor_buffer_functor.h" #include "paddle/fluid/framework/details/share_tensor_buffer_functor.h"
DEFINE_bool(new_executor_use_inplace, true, "Use inplace in new executor"); PADDLE_DEFINE_EXPORTED_bool(new_executor_use_inplace, true,
"Use inplace in new executor");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -47,7 +47,8 @@ class LoDTensor; ...@@ -47,7 +47,8 @@ class LoDTensor;
DECLARE_bool(benchmark); DECLARE_bool(benchmark);
DECLARE_bool(check_nan_inf); DECLARE_bool(check_nan_inf);
DECLARE_bool(enable_unused_var_check); DECLARE_bool(enable_unused_var_check);
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op"); PADDLE_DEFINE_EXPORTED_int32(inner_op_parallelism, 0,
"number of threads for inner op");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -46,11 +46,13 @@ DECLARE_double(eager_delete_tensor_gb); ...@@ -46,11 +46,13 @@ DECLARE_double(eager_delete_tensor_gb);
#ifdef WITH_GPERFTOOLS #ifdef WITH_GPERFTOOLS
#include "gperftools/profiler.h" #include "gperftools/profiler.h"
#endif #endif
DEFINE_string(pe_profile_fname, "", PADDLE_DEFINE_EXPORTED_string(
"Profiler filename for PE, which generated by gperftools." pe_profile_fname, "",
"Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable."); "Profiler filename for PE, which generated by gperftools."
DEFINE_bool(enable_parallel_graph, false, "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
"Force disable parallel graph execution mode if set false."); PADDLE_DEFINE_EXPORTED_bool(
enable_parallel_graph, false,
"Force disable parallel graph execution mode if set false.");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
DECLARE_bool(benchmark); DECLARE_bool(benchmark);
DEFINE_bool( PADDLE_DEFINE_EXPORTED_bool(
eager_delete_scope, true, eager_delete_scope, true,
"Delete local scope eagerly. It will reduce GPU memory usage but " "Delete local scope eagerly. It will reduce GPU memory usage but "
"slow down the destruction of variables.(around 1% performance harm)"); "slow down the destruction of variables.(around 1% performance harm)");
......
...@@ -23,9 +23,10 @@ limitations under the License. */ ...@@ -23,9 +23,10 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
DEFINE_bool(enable_unused_var_check, false, PADDLE_DEFINE_EXPORTED_bool(
"Checking whether operator contains unused inputs, " enable_unused_var_check, false,
"especially for grad operator. It should be in unittest."); "Checking whether operator contains unused inputs, "
"especially for grad operator. It should be in unittest.");
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -38,27 +38,6 @@ ...@@ -38,27 +38,6 @@
PADDLE_FORCE_LINK_FLAG(free_idle_chunk); PADDLE_FORCE_LINK_FLAG(free_idle_chunk);
PADDLE_FORCE_LINK_FLAG(free_when_no_cache_hit); PADDLE_FORCE_LINK_FLAG(free_when_no_cache_hit);
// debug
DECLARE_bool(cpu_deterministic);
// IR
DECLARE_bool(convert_all_blocks);
// executor
DECLARE_bool(enable_parallel_graph);
DECLARE_string(pe_profile_fname);
DECLARE_string(print_sub_graph_dir);
DECLARE_bool(new_executor_use_inplace);
// memory management
DECLARE_bool(eager_delete_scope);
DECLARE_int32(fuse_parameter_groups_size);
DECLARE_double(fuse_parameter_memory_size);
// others
DECLARE_int32(inner_op_parallelism);
DECLARE_bool(enable_unused_var_check);
// NOTE: where are these 2 flags from? // NOTE: where are these 2 flags from?
#ifdef PADDLE_WITH_DISTRIBUTE #ifdef PADDLE_WITH_DISTRIBUTE
DECLARE_int32(rpc_get_thread_num); DECLARE_int32(rpc_get_thread_num);
...@@ -253,18 +232,6 @@ struct RegisterGetterSetterVisitor : public boost::static_visitor<void> { ...@@ -253,18 +232,6 @@ struct RegisterGetterSetterVisitor : public boost::static_visitor<void> {
}; };
static void RegisterGlobalVarGetterSetter() { static void RegisterGlobalVarGetterSetter() {
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_cpu_deterministic);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_convert_all_blocks);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_enable_parallel_graph);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_pe_profile_fname);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_print_sub_graph_dir);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_new_executor_use_inplace);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_eager_delete_scope);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_fuse_parameter_groups_size);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_fuse_parameter_memory_size);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_inner_op_parallelism);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_enable_unused_var_check);
#ifdef PADDLE_WITH_DITRIBUTE #ifdef PADDLE_WITH_DITRIBUTE
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_get_thread_num); REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_get_thread_num);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_prefetch_thread_num); REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_prefetch_thread_num);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册