diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 4c4fb03c229b3938daf710c3708c83c3cda05665..9cb2b5ee71d75c275937698ce8791a4eb9753645 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -30,7 +30,7 @@ DEFINE_bool(check_nan_inf, false, "Checking whether operator produce NAN/INF or not. It will be " "extremely slow so please use this flag wisely."); DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op"); -DEFINE_int32(min_param_size_to_use_multithread, 0, ""); +DEFINE_int32(min_row_size_to_use_multithread, 0, ""); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index eea3db6577af4e1a0786d05ae47d1ac434b71301..2962dff122d59b220986f4b4b2b1c3496ad9da72 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -35,7 +35,7 @@ limitations under the License. */ #include "paddle/fluid/platform/variant.h" DECLARE_int32(inner_op_parallelism); -DECLARE_int32(min_param_size_to_use_multithread); +DECLARE_int32(min_row_size_to_use_multithread); namespace paddle { namespace framework { diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index e69ede623977bbca13bf421d0edc57798e288c5b..9cd7906877f13b5b5f963d791be3e6a6ac8361e8 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -478,12 +478,12 @@ class AdamOpKernel : public framework::OpKernel { } } } else if (FLAGS_inner_op_parallelism > 1 && - FLAGS_min_param_size_to_use_multithread > 0 && - param.numel() > FLAGS_min_param_size_to_use_multithread) { + FLAGS_min_row_size_to_use_multithread > 0 && + param.dims()[0] > FLAGS_min_row_size_to_use_multithread) { VLOG(3) << "use multi thread, inner_op_parallelism=" << FLAGS_inner_op_parallelism - << " min_param_size_to_use_multithread=" - << FLAGS_min_param_size_to_use_multithread; + << " min_row_size_to_use_multithread=" + << FLAGS_min_row_size_to_use_multithread; if (FLAGS_inner_op_parallelism > 10) { LOG(WARNING) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism << " is two large!"; diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 691b49130b035405a45df15453df87cb80c19582..b577dfc3e185391adf09a963c14f01eafaacdaf4 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -129,7 +129,7 @@ def __bootstrap__(): 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', 'allocator_strategy', 'reader_queue_speed_test_mode', 'print_sub_graph_dir', 'pe_profile_fname', 'warpctc_dir', - 'inner_op_parallelism', 'min_param_size_to_use_multithread', + 'inner_op_parallelism', 'min_row_size_to_use_multithread', 'enable_parallel_graph' ] if 'Darwin' not in sysstr: diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 79edc92055c41bce3bd1c2eb3c71788d19710d94..ac092e19b4d32f38e4bc0b2a64760836892f6029 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -87,7 +87,7 @@ list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) -py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS FLAGS_inner_op_parallelism=4 FLAGS_min_param_size_to_use_multithread=2) +py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS FLAGS_inner_op_parallelism=4 FLAGS_min_row_size_to_use_multithread=2) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) py_test_modules(test_bilinear_interp_op MODULES test_bilinear_interp_op SERIAL) py_test_modules(test_nearest_interp_op MODULES test_nearest_interp_op SERIAL)