From 885c61f0861000bc884dd28e2b3d37020d2ff9af Mon Sep 17 00:00:00 2001 From: arlesniak Date: Mon, 31 Aug 2020 10:52:05 +0200 Subject: [PATCH] Add use of global flag 'use_mkldnn' to layer_helper (#26497) * get use of global 'use_mkldnn' in layer_helper * update for CI * update for CI, relu test * update for CI, relu test added, make FLAGS_use_mkldnn a public flag * added more strict tests, fixes after review * fixes after review * fixes after review, CI stuff --- paddle/fluid/imperative/tracer.cc | 5 ++++ .../pybind/global_value_getter_setter.cc | 5 ++-- .../tests/unittests/test_get_set_flags.py | 6 ++--- .../test_global_var_getter_setter.py | 2 +- .../tests/unittests/test_imperative_basic.py | 26 +++++++++++++++++++ 5 files changed, 37 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index d09cb033603..1c364300d2c 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -21,6 +21,8 @@ #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/string/string_helper.h" +DECLARE_bool(use_mkldnn); + namespace paddle { namespace imperative { @@ -47,6 +49,9 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, const NameVarBaseMap& outs, framework::AttributeMap attrs, const platform::Place& place, bool trace_backward) { VLOG(1) << "Trace Op: " << type; + if (FLAGS_use_mkldnn) { + attrs["use_mkldnn"] = true; + } auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false); const auto& op_info = op->Info(); auto* attr_checker = op_info.Checker(); diff --git a/paddle/fluid/pybind/global_value_getter_setter.cc b/paddle/fluid/pybind/global_value_getter_setter.cc index f1084018d9c..318178d5eb9 100644 --- a/paddle/fluid/pybind/global_value_getter_setter.cc +++ b/paddle/fluid/pybind/global_value_getter_setter.cc @@ -334,8 +334,7 @@ void BindGlobalValueGetterSetter(pybind11::module *module) { } while (0) static void RegisterGlobalVarGetterSetter() { - REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_use_mkldnn, - FLAGS_free_idle_chunk, + REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_free_idle_chunk, FLAGS_free_when_no_cache_hit); REGISTER_PUBLIC_GLOBAL_VAR( @@ -349,7 +348,7 @@ static void RegisterGlobalVarGetterSetter() { FLAGS_init_allocated_mem, FLAGS_initial_cpu_memory_in_mb, FLAGS_memory_fraction_of_eager_deletion, FLAGS_use_pinned_memory, FLAGS_benchmark, FLAGS_inner_op_parallelism, FLAGS_tracer_profile_fname, - FLAGS_paddle_num_threads); + FLAGS_paddle_num_threads, FLAGS_use_mkldnn); #ifdef PADDLE_WITH_CUDA REGISTER_PUBLIC_GLOBAL_VAR( diff --git a/python/paddle/fluid/tests/unittests/test_get_set_flags.py b/python/paddle/fluid/tests/unittests/test_get_set_flags.py index 2a5b8454e03..e2761ff4358 100644 --- a/python/paddle/fluid/tests/unittests/test_get_set_flags.py +++ b/python/paddle/fluid/tests/unittests/test_get_set_flags.py @@ -40,7 +40,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase): def test_errors(self): flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf'] flag = 1 - flag_private = {'FLAGS_use_mkldnn': True} + flag_private = {'FLAGS_free_idle_chunk': True} # flags type of set_flags should be dict. def test_set_flags_input_type(): @@ -51,7 +51,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase): # flags in set_flags should be public flags. def test_set_private_flag(): - fluid.get_flags('FLAGS_use_mkldnn') + fluid.set_flags(flag_private) self.assertRaises(ValueError, test_set_private_flag) @@ -63,7 +63,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase): # flags in get_flags should be public flags. def test_get_private_flag(): - fluid.get_flags('FLAGS_use_mkldnn') + fluid.get_flags('FLAGS_free_idle_chunk') self.assertRaises(ValueError, test_get_private_flag) diff --git a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py index 548b7583115..3394a08de8b 100644 --- a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py +++ b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py @@ -26,7 +26,7 @@ class VarInfo(object): class TestGlobalVarGetterSetter(unittest.TestCase): def test_main(self): var_infos = [ - VarInfo("FLAGS_use_mkldnn", bool, False), + VarInfo("FLAGS_free_idle_chunk", bool, False), VarInfo("FLAGS_eager_delete_tensor_gb", float, True), ] diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index b74182d27ab..74cfeab601b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -19,6 +19,7 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid import Linear +from paddle.fluid.layer_helper import LayerHelper from test_imperative_base import new_program_scope import paddle.fluid.dygraph_utils as dygraph_utils from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper @@ -636,6 +637,31 @@ class TestDygraphUtils(unittest.TestCase): res2 = fluid.layers.sigmoid(a) self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + def test_append_activation_in_dygraph_use_mkldnn(self): + a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) + helper = LayerHelper( + fluid.unique_name.generate("test"), act="relu", use_mkldnn=True) + func = helper.append_activation + with fluid.dygraph.guard(): + a = fluid.dygraph.to_variable(a_np) + res1 = func(a) + res2 = fluid.layers.relu(a) + self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + + def test_append_activation_in_dygraph_global_use_mkldnn(self): + a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) + helper = LayerHelper(fluid.unique_name.generate("test"), act="relu") + func = helper.append_activation + with fluid.dygraph.guard(): + a = fluid.dygraph.to_variable(a_np) + fluid.set_flags({'FLAGS_use_mkldnn': True}) + try: + res1 = func(a) + finally: + fluid.set_flags({'FLAGS_use_mkldnn': False}) + res2 = fluid.layers.relu(a) + self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + def test_append_bias_in_dygraph_exception(self): with new_program_scope(): np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32) -- GitLab