未验证 提交 885c61f0 编写于 作者: A arlesniak 提交者: GitHub

Add use of global flag 'use_mkldnn' to layer_helper (#26497)

* get use of global 'use_mkldnn' in layer_helper

* update for CI

* update for CI, relu test

* update for CI, relu test added, make FLAGS_use_mkldnn a public flag

* added more strict tests, fixes after review

* fixes after review

* fixes after review, CI stuff
上级 f44420c8
......@@ -21,6 +21,8 @@
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/string/string_helper.h"
DECLARE_bool(use_mkldnn);
namespace paddle {
namespace imperative {
......@@ -47,6 +49,9 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward) {
VLOG(1) << "Trace Op: " << type;
if (FLAGS_use_mkldnn) {
attrs["use_mkldnn"] = true;
}
auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
const auto& op_info = op->Info();
auto* attr_checker = op_info.Checker();
......
......@@ -334,8 +334,7 @@ void BindGlobalValueGetterSetter(pybind11::module *module) {
} while (0)
static void RegisterGlobalVarGetterSetter() {
REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_use_mkldnn,
FLAGS_free_idle_chunk,
REGISTER_PRIVATE_GLOBAL_VAR(/*is_writable=*/false, FLAGS_free_idle_chunk,
FLAGS_free_when_no_cache_hit);
REGISTER_PUBLIC_GLOBAL_VAR(
......@@ -349,7 +348,7 @@ static void RegisterGlobalVarGetterSetter() {
FLAGS_init_allocated_mem, FLAGS_initial_cpu_memory_in_mb,
FLAGS_memory_fraction_of_eager_deletion, FLAGS_use_pinned_memory,
FLAGS_benchmark, FLAGS_inner_op_parallelism, FLAGS_tracer_profile_fname,
FLAGS_paddle_num_threads);
FLAGS_paddle_num_threads, FLAGS_use_mkldnn);
#ifdef PADDLE_WITH_CUDA
REGISTER_PUBLIC_GLOBAL_VAR(
......
......@@ -40,7 +40,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase):
def test_errors(self):
flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
flag = 1
flag_private = {'FLAGS_use_mkldnn': True}
flag_private = {'FLAGS_free_idle_chunk': True}
# flags type of set_flags should be dict.
def test_set_flags_input_type():
......@@ -51,7 +51,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase):
# flags in set_flags should be public flags.
def test_set_private_flag():
fluid.get_flags('FLAGS_use_mkldnn')
fluid.set_flags(flag_private)
self.assertRaises(ValueError, test_set_private_flag)
......@@ -63,7 +63,7 @@ class TestGetAndSetFlagsErrors(unittest.TestCase):
# flags in get_flags should be public flags.
def test_get_private_flag():
fluid.get_flags('FLAGS_use_mkldnn')
fluid.get_flags('FLAGS_free_idle_chunk')
self.assertRaises(ValueError, test_get_private_flag)
......
......@@ -26,7 +26,7 @@ class VarInfo(object):
class TestGlobalVarGetterSetter(unittest.TestCase):
def test_main(self):
var_infos = [
VarInfo("FLAGS_use_mkldnn", bool, False),
VarInfo("FLAGS_free_idle_chunk", bool, False),
VarInfo("FLAGS_eager_delete_tensor_gb", float, True),
]
......
......@@ -19,6 +19,7 @@ import numpy as np
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import Linear
from paddle.fluid.layer_helper import LayerHelper
from test_imperative_base import new_program_scope
import paddle.fluid.dygraph_utils as dygraph_utils
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
......@@ -636,6 +637,31 @@ class TestDygraphUtils(unittest.TestCase):
res2 = fluid.layers.sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_activation_in_dygraph_use_mkldnn(self):
a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)
helper = LayerHelper(
fluid.unique_name.generate("test"), act="relu", use_mkldnn=True)
func = helper.append_activation
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
res1 = func(a)
res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_activation_in_dygraph_global_use_mkldnn(self):
a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)
helper = LayerHelper(fluid.unique_name.generate("test"), act="relu")
func = helper.append_activation
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
fluid.set_flags({'FLAGS_use_mkldnn': True})
try:
res1 = func(a)
finally:
fluid.set_flags({'FLAGS_use_mkldnn': False})
res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_bias_in_dygraph_exception(self):
with new_program_scope():
np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册