未验证 提交 885c61f0 编写于 作者: A arlesniak 提交者: GitHub

Add use of global flag 'use_mkldnn' to layer_helper (#26497)

* get use of global 'use_mkldnn' in layer_helper

* update for CI

* update for CI, relu test

* update for CI, relu test added, make FLAGS_use_mkldnn a public flag

* added more strict tests, fixes after review

* fixes after review

* fixes after review, CI stuff
上级 f44420c8
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/string/string_helper.h" #include "paddle/fluid/string/string_helper.h"
DECLARE_bool(use_mkldnn);
namespace paddle { namespace paddle {
namespace imperative { namespace imperative {
...@@ -47,6 +49,9 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, ...@@ -47,6 +49,9 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward) { const platform::Place& place, bool trace_backward) {
VLOG(1) << "Trace Op: " << type; VLOG(1) << "Trace Op: " << type;
if (FLAGS_use_mkldnn) {
attrs["use_mkldnn"] = true;
}
auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false); auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
const auto& op_info = op->Info(); const auto& op_info = op->Info();
auto* attr_checker = op_info.Checker(); auto* attr_checker = op_info.Checker();
......
...@@ -334,8 +334,7 @@ void BindGlobalValueGetterSetter(pybind11::module *module) { ...@@ -334,8 +334,7 @@ void BindGlobalValueGetterSetter(pybind11::module *module) {
} while (0) } while (0)
static void RegisterGlobalVarGetterSetter() { static void RegisterGlobalVarGetterSetter() {
REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_use_mkldnn, REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_free_idle_chunk,
FLAGS_free_idle_chunk,
FLAGS_free_when_no_cache_hit); FLAGS_free_when_no_cache_hit);
REGISTER_PUBLIC_GLOBAL_VAR( REGISTER_PUBLIC_GLOBAL_VAR(
...@@ -349,7 +348,7 @@ static void RegisterGlobalVarGetterSetter() { ...@@ -349,7 +348,7 @@ static void RegisterGlobalVarGetterSetter() {
FLAGS_init_allocated_mem, FLAGS_initial_cpu_memory_in_mb, FLAGS_init_allocated_mem, FLAGS_initial_cpu_memory_in_mb,
FLAGS_memory_fraction_of_eager_deletion, FLAGS_use_pinned_memory, FLAGS_memory_fraction_of_eager_deletion, FLAGS_use_pinned_memory,
FLAGS_benchmark, FLAGS_inner_op_parallelism, FLAGS_tracer_profile_fname, FLAGS_benchmark, FLAGS_inner_op_parallelism, FLAGS_tracer_profile_fname,
FLAGS_paddle_num_threads); FLAGS_paddle_num_threads, FLAGS_use_mkldnn);
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
REGISTER_PUBLIC_GLOBAL_VAR( REGISTER_PUBLIC_GLOBAL_VAR(
......
...@@ -40,7 +40,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase): ...@@ -40,7 +40,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase):
def test_errors(self): def test_errors(self):
flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf'] flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
flag = 1 flag = 1
flag_private = {'FLAGS_use_mkldnn': True} flag_private = {'FLAGS_free_idle_chunk': True}
# flags type of set_flags should be dict. # flags type of set_flags should be dict.
def test_set_flags_input_type(): def test_set_flags_input_type():
...@@ -51,7 +51,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase):
# flags in set_flags should be public flags. # flags in set_flags should be public flags.
def test_set_private_flag(): def test_set_private_flag():
fluid.get_flags('FLAGS_use_mkldnn') fluid.set_flags(flag_private)
self.assertRaises(ValueError, test_set_private_flag) self.assertRaises(ValueError, test_set_private_flag)
...@@ -63,7 +63,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase):
# flags in get_flags should be public flags. # flags in get_flags should be public flags.
def test_get_private_flag(): def test_get_private_flag():
fluid.get_flags('FLAGS_use_mkldnn') fluid.get_flags('FLAGS_free_idle_chunk')
self.assertRaises(ValueError, test_get_private_flag) self.assertRaises(ValueError, test_get_private_flag)
......
...@@ -26,7 +26,7 @@ class VarInfo(object): ...@@ -26,7 +26,7 @@ class VarInfo(object):
class TestGlobalVarGetterSetter(unittest.TestCase): class TestGlobalVarGetterSetter(unittest.TestCase):
def test_main(self): def test_main(self):
var_infos = [ var_infos = [
VarInfo("FLAGS_use_mkldnn", bool, False), VarInfo("FLAGS_free_idle_chunk", bool, False),
VarInfo("FLAGS_eager_delete_tensor_gb", float, True), VarInfo("FLAGS_eager_delete_tensor_gb", float, True),
] ]
......
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import Linear from paddle.fluid import Linear
from paddle.fluid.layer_helper import LayerHelper
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
import paddle.fluid.dygraph_utils as dygraph_utils import paddle.fluid.dygraph_utils as dygraph_utils
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
...@@ -636,6 +637,31 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -636,6 +637,31 @@ class TestDygraphUtils(unittest.TestCase):
res2 = fluid.layers.sigmoid(a) res2 = fluid.layers.sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_activation_in_dygraph_use_mkldnn(self):
a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)
helper = LayerHelper(
fluid.unique_name.generate("test"), act="relu", use_mkldnn=True)
func = helper.append_activation
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
res1 = func(a)
res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_activation_in_dygraph_global_use_mkldnn(self):
a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)
helper = LayerHelper(fluid.unique_name.generate("test"), act="relu")
func = helper.append_activation
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
fluid.set_flags({'FLAGS_use_mkldnn': True})
try:
res1 = func(a)
finally:
fluid.set_flags({'FLAGS_use_mkldnn': False})
res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_bias_in_dygraph_exception(self): def test_append_bias_in_dygraph_exception(self):
with new_program_scope(): with new_program_scope():
np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32) np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册