提交 042f3524 编写于 作者: T tensor-tang

add flag use_mkl_packed

上级 89bbc4f6
......@@ -29,6 +29,7 @@ DECLARE_bool(with_gpu);
DECLARE_bool(parallel_nn);
DECLARE_string(config_args);
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkl_packed);
const char *kConfigParserModuleName = "paddle.trainer.config_parser";
const char *kConfigParserFuncName = "parse_config_and_serialize";
......@@ -46,6 +47,7 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath)
<< ",with_cost=" << FLAGS_with_cost << ",use_gpu=" << FLAGS_use_gpu
<< ",parallel_nn=" << FLAGS_parallel_nn
<< ",use_mkldnn=" << FLAGS_use_mkldnn
<< ",use_mkl_packed=" << FLAGS_use_mkl_packed
<< ",cudnn_version=" << hl_get_cudnn_lib_version();
if (!FLAGS_config_args.empty()) {
configArgs << "," << FLAGS_config_args;
......
......@@ -27,6 +27,12 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training");
DEFINE_bool(use_mkldnn, false, "Only support CPU training");
#endif
#ifdef PADDLE_WITH_MKLML
DEFINE_bool(use_mkl_packed, true, "Default use MKL Packed Optimization");
#else
DEFINE_bool(use_mkl_packed, false, "Whether to use MKL Packed Optimization");
#endif
DEFINE_bool(parallel_nn,
false,
"Whether to use multi-threads to calculate one neural network."
......
......@@ -41,3 +41,4 @@ DECLARE_string(predict_file);
DECLARE_bool(prev_batch_state);
DECLARE_string(init_model_path);
DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkl_packed);
......@@ -135,6 +135,8 @@ def init(**kwargs):
cp.g_command_config_args['use_gpu'] = kwargs['use_gpu']
if 'use_mkldnn' in kwargs:
cp.g_command_config_args['use_mkldnn'] = kwargs['use_mkldnn']
if 'use_mkl_packed' in kwargs:
cp.g_command_config_args['use_mkl_packed'] = kwargs['use_mkl_packed']
assert 'parallel_nn' not in kwargs, ("currently 'parallel_nn' is not "
"supported in v2 APIs.")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册