未验证 提交 d22914fd 编写于 作者: Z Zeng Jinle 提交者: GitHub

change to PADDLE_DEFINE_EXPORTED (#35841)

上级 fcfb0afe
......@@ -19,7 +19,7 @@
#include "paddle/fluid/framework/details/variable_visitor.h"
#include "paddle/fluid/platform/profiler.h"
DEFINE_bool(
PADDLE_DEFINE_EXPORTED_bool(
cpu_deterministic, false,
"Whether to make the result of computation deterministic in CPU side.");
......
......@@ -25,13 +25,14 @@ class VarDesc;
} // namespace framework
} // namespace paddle
DEFINE_double(fuse_parameter_memory_size, -1.0, // MBytes
PADDLE_DEFINE_EXPORTED_double(
fuse_parameter_memory_size, -1.0, // MBytes
"fuse_parameter_memory_size is up limited memory size(MB)"
"of one group parameters' gradient which is the input "
"of communication calling(e.g NCCLAllReduce). "
"The default value is 0, it means that "
"not set group according to memory_size.");
DEFINE_int32(
PADDLE_DEFINE_EXPORTED_int32(
fuse_parameter_groups_size, 1,
"fuse_parameter_groups_size is the up limited size of one group "
"parameters' gradient. "
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/operator.h"
DEFINE_bool(convert_all_blocks, true,
PADDLE_DEFINE_EXPORTED_bool(convert_all_blocks, true,
"Convert all blocks in program into SSAgraphs");
namespace paddle {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_proto_maker.h"
DECLARE_bool(convert_all_blocks);
DEFINE_string(print_sub_graph_dir, "",
PADDLE_DEFINE_EXPORTED_string(print_sub_graph_dir, "",
"FLAGS_print_sub_graph_dir is used "
"to print the nodes of sub_graphs.");
......
......@@ -19,7 +19,8 @@
#include "paddle/fluid/framework/details/share_tensor_buffer_functor.h"
DEFINE_bool(new_executor_use_inplace, true, "Use inplace in new executor");
PADDLE_DEFINE_EXPORTED_bool(new_executor_use_inplace, true,
"Use inplace in new executor");
namespace paddle {
namespace framework {
......
......@@ -47,7 +47,8 @@ class LoDTensor;
DECLARE_bool(benchmark);
DECLARE_bool(check_nan_inf);
DECLARE_bool(enable_unused_var_check);
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op");
PADDLE_DEFINE_EXPORTED_int32(inner_op_parallelism, 0,
"number of threads for inner op");
namespace paddle {
namespace framework {
......
......@@ -46,10 +46,12 @@ DECLARE_double(eager_delete_tensor_gb);
#ifdef WITH_GPERFTOOLS
#include "gperftools/profiler.h"
#endif
DEFINE_string(pe_profile_fname, "",
PADDLE_DEFINE_EXPORTED_string(
pe_profile_fname, "",
"Profiler filename for PE, which generated by gperftools."
"Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
DEFINE_bool(enable_parallel_graph, false,
PADDLE_DEFINE_EXPORTED_bool(
enable_parallel_graph, false,
"Force disable parallel graph execution mode if set false.");
namespace paddle {
......
......@@ -19,7 +19,7 @@ limitations under the License. */
DECLARE_bool(benchmark);
DEFINE_bool(
PADDLE_DEFINE_EXPORTED_bool(
eager_delete_scope, true,
"Delete local scope eagerly. It will reduce GPU memory usage but "
"slow down the destruction of variables.(around 1% performance harm)");
......
......@@ -23,7 +23,8 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h"
DEFINE_bool(enable_unused_var_check, false,
PADDLE_DEFINE_EXPORTED_bool(
enable_unused_var_check, false,
"Checking whether operator contains unused inputs, "
"especially for grad operator. It should be in unittest.");
......
......@@ -38,27 +38,6 @@
PADDLE_FORCE_LINK_FLAG(free_idle_chunk);
PADDLE_FORCE_LINK_FLAG(free_when_no_cache_hit);
// debug
DECLARE_bool(cpu_deterministic);
// IR
DECLARE_bool(convert_all_blocks);
// executor
DECLARE_bool(enable_parallel_graph);
DECLARE_string(pe_profile_fname);
DECLARE_string(print_sub_graph_dir);
DECLARE_bool(new_executor_use_inplace);
// memory management
DECLARE_bool(eager_delete_scope);
DECLARE_int32(fuse_parameter_groups_size);
DECLARE_double(fuse_parameter_memory_size);
// others
DECLARE_int32(inner_op_parallelism);
DECLARE_bool(enable_unused_var_check);
// NOTE: where are these 2 flags from?
#ifdef PADDLE_WITH_DISTRIBUTE
DECLARE_int32(rpc_get_thread_num);
......@@ -253,18 +232,6 @@ struct RegisterGetterSetterVisitor : public boost::static_visitor<void> {
};
static void RegisterGlobalVarGetterSetter() {
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_cpu_deterministic);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_convert_all_blocks);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_enable_parallel_graph);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_pe_profile_fname);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_print_sub_graph_dir);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_new_executor_use_inplace);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_eager_delete_scope);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_fuse_parameter_groups_size);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_fuse_parameter_memory_size);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_inner_op_parallelism);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_enable_unused_var_check);
#ifdef PADDLE_WITH_DITRIBUTE
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_get_thread_num);
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_prefetch_thread_num);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册