From 87b4eb1da497c1ac4cc1a3d50a1f317b839c954d Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Mon, 7 Jan 2019 17:13:47 +0800 Subject: [PATCH] change min_param_size_to_use_multithread to min_row_size_to_use_multithread --- paddle/fluid/framework/operator.cc | 2 +- paddle/fluid/framework/operator.h | 2 +- paddle/fluid/operators/optimizers/adam_op.h | 8 ++++---- python/paddle/fluid/__init__.py | 2 +- python/paddle/fluid/tests/unittests/CMakeLists.txt | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 4c4fb03c22..9cb2b5ee71 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -30,7 +30,7 @@ DEFINE_bool(check_nan_inf, false, "Checking whether operator produce NAN/INF or not. It will be " "extremely slow so please use this flag wisely."); DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op"); -DEFINE_int32(min_param_size_to_use_multithread, 0, ""); +DEFINE_int32(min_row_size_to_use_multithread, 0, ""); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index eea3db6577..2962dff122 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -35,7 +35,7 @@ limitations under the License. */ #include "paddle/fluid/platform/variant.h" DECLARE_int32(inner_op_parallelism); -DECLARE_int32(min_param_size_to_use_multithread); +DECLARE_int32(min_row_size_to_use_multithread); namespace paddle { namespace framework { diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index e69ede6239..9cd7906877 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -478,12 +478,12 @@ class AdamOpKernel : public framework::OpKernel { } } } else if (FLAGS_inner_op_parallelism > 1 && - FLAGS_min_param_size_to_use_multithread > 0 && - param.numel() > FLAGS_min_param_size_to_use_multithread) { + FLAGS_min_row_size_to_use_multithread > 0 && + param.dims()[0] > FLAGS_min_row_size_to_use_multithread) { VLOG(3) << "use multi thread, inner_op_parallelism=" << FLAGS_inner_op_parallelism - << " min_param_size_to_use_multithread=" - << FLAGS_min_param_size_to_use_multithread; + << " min_row_size_to_use_multithread=" + << FLAGS_min_row_size_to_use_multithread; if (FLAGS_inner_op_parallelism > 10) { LOG(WARNING) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism << " is two large!"; diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 691b49130b..b577dfc3e1 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -129,7 +129,7 @@ def __bootstrap__(): 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', 'allocator_strategy', 'reader_queue_speed_test_mode', 'print_sub_graph_dir', 'pe_profile_fname', 'warpctc_dir', - 'inner_op_parallelism', 'min_param_size_to_use_multithread', + 'inner_op_parallelism', 'min_row_size_to_use_multithread', 'enable_parallel_graph' ] if 'Darwin' not in sysstr: diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 79edc92055..ac092e19b4 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -87,7 +87,7 @@ list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) -py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS FLAGS_inner_op_parallelism=4 FLAGS_min_param_size_to_use_multithread=2) +py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS FLAGS_inner_op_parallelism=4 FLAGS_min_row_size_to_use_multithread=2) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) py_test_modules(test_bilinear_interp_op MODULES test_bilinear_interp_op SERIAL) py_test_modules(test_nearest_interp_op MODULES test_nearest_interp_op SERIAL) -- GitLab