未验证 提交 abfdffa0 编写于 作者: S Sylwester Fraczek 提交者: GitHub

add use_mkldnn attribute to ops in dygraph (#25773)

上级 638bbb61
......@@ -136,18 +136,13 @@ class LayerObjectHelper(LayerHelperBase):
return param
# TODO: this should not be called anymore after all activation func move to Layers
def append_activation(self,
input_var,
act=None,
use_cudnn=None,
use_mkl_dnn=None):
def append_activation(self, input_var, act=None, use_cudnn=None):
"""Append activation
Args:
input_var: the input variable. The len(input_var.shape) is
larger or equal than 2.
act: activation type
use_mkl_dnn: if use mkldnn
use_cudnn: if use cudnn
Return the Variable of after append activation
......@@ -163,8 +158,9 @@ class LayerObjectHelper(LayerHelperBase):
if (use_cudnn is not None) and use_cudnn:
act['use_cudnn'] = use_cudnn
if (use_mkl_dnn is not None) and use_mkl_dnn:
act['use_mkldnn'] = use_mkl_dnn
use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
if (use_mkldnn is not None) and use_mkldnn:
act['use_mkldnn'] = use_mkldnn
act_type = act.pop('type')
tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
......
......@@ -180,6 +180,7 @@ class Conv2D(layers.Layer):
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
......@@ -187,7 +188,8 @@ class Conv2D(layers.Layer):
self._dtype = dtype
if (self._num_channels == self._groups and
num_filters % self._num_channels == 0 and not self._use_cudnn):
num_filters % self._num_channels == 0 and
not self._use_cudnn and not self._use_mkldnn):
self._l_type = 'depthwise_conv2d'
else:
self._l_type = 'conv2d'
......@@ -224,14 +226,15 @@ class Conv2D(layers.Layer):
if in_dygraph_mode() and self._l_type == 'conv2d':
attrs = ('strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups
if self._groups else 1, 'use_cudnn', self._use_cudnn)
if self._groups else 1, 'use_cudnn', self._use_cudnn,
'use_mkldnn', self._use_mkldnn)
out = core.ops.conv2d(input, self.weight, *attrs)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias,
1)
return dygraph_utils._append_activation_in_dygraph(pre_act,
self._act)
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
inputs = {
'Input': [input],
'Filter': [self.weight],
......@@ -242,7 +245,7 @@ class Conv2D(layers.Layer):
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': False,
'use_mkldnn': self._use_mkldnn,
}
check_variable_and_dtype(input, 'input',
......@@ -267,7 +270,8 @@ class Conv2D(layers.Layer):
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
attrs={'axis': 1,
'use_mkldnn': self._use_mkldnn})
else:
pre_act = pre_bias
......@@ -828,6 +832,8 @@ class Pool2D(layers.Layer):
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
......@@ -853,8 +859,8 @@ class Pool2D(layers.Layer):
'global_pooling', self._global_pooling, 'strides',
self._pool_stride, 'paddings', self._pool_padding,
'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode,
'use_mkldnn', False, 'exclusive', self._exclusive,
'data_format', self._data_format)
'use_mkldnn', self._use_mkldnn, 'exclusive',
self._exclusive, 'data_format', self._data_format)
return core.ops.pool2d(input, *attrs)
check_variable_and_dtype(
......@@ -869,7 +875,7 @@ class Pool2D(layers.Layer):
"paddings": self._pool_padding,
"use_cudnn": self._use_cudnn,
"ceil_mode": self._ceil_mode,
"use_mkldnn": False,
"use_mkldnn": self._use_mkldnn,
"exclusive": self._exclusive,
"data_format": self._data_format,
}
......@@ -958,16 +964,22 @@ class Linear(layers.Layer):
self.bias = self.create_parameter(
shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True)
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
def forward(self, input):
if in_dygraph_mode():
pre_bias = _varbase_creator(dtype=input.dtype)
core.ops.matmul(input, self.weight, pre_bias, 'transpose_X', False,
'transpose_Y', False, "alpha", 1)
'transpose_Y', False, "alpha", 1, "use_mkldnn",
self._use_mkldnn)
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias, self.bias, axis=len(input.shape) - 1)
pre_bias,
self.bias,
axis=len(input.shape) - 1,
use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(pre_act,
self._act)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], "Linear")
......@@ -976,6 +988,7 @@ class Linear(layers.Layer):
"transpose_X": False,
"transpose_Y": False,
"alpha": 1,
"use_mkldnn": self._use_mkldnn,
}
inputs = {"X": [input], "Y": [self.weight]}
......@@ -990,7 +1003,10 @@ class Linear(layers.Layer):
inputs={'X': [tmp],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={'axis': len(input.shape) - 1})
attrs={
'axis': len(input.shape) - 1,
'use_mkldnn': self._use_mkldnn
})
else:
pre_activation = tmp
return self._helper.append_activation(pre_activation, act=self._act)
......@@ -1250,6 +1266,7 @@ class BatchNorm(layers.Layer):
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
......@@ -1314,8 +1331,8 @@ class BatchNorm(layers.Layer):
if in_dygraph_mode():
attrs = ("momentum", self._momentum, "epsilon", self._epsilon,
"is_test", not self.training, "data_layout",
self._data_layout, "use_mkldnn", False, "fuse_with_relu",
self._fuse_with_relu, "use_global_stats",
self._data_layout, "use_mkldnn", self._use_mkldnn,
"fuse_with_relu", self._fuse_with_relu, "use_global_stats",
self._use_global_stats, 'trainable_statistics',
self._trainable_statistics)
batch_norm_out, _, _, _, _, _ = core.ops.batch_norm(
......@@ -1323,7 +1340,7 @@ class BatchNorm(layers.Layer):
mean_out, variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act)
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'BatchNorm')
......
......@@ -45,17 +45,19 @@ def _append_activation_in_dygraph(input,
@dygraph_only
def _append_bias_in_dygraph(input, bias=None, axis=1):
def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False):
"""Append bias operation in dygraph mode.
Args:
input: the input variable.
bias: the bias to be appended
axis: the axis to perform operation
use_mkldnn: whether to use mkldnn
Return the Variable after bias operation
"""
if bias is None:
return input
return core.ops.elementwise_add(input, bias, 'axis', axis)
return core.ops.elementwise_add(input, bias, 'axis', axis, 'use_mkldnn',
use_mkldnn)
......@@ -11414,7 +11414,12 @@ Examples:
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_add')
x,
y,
axis=axis,
act=act,
op_name='elementwise_add',
use_mkldnn=core.globals()["FLAGS_use_mkldnn"])
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
......
......@@ -21,6 +21,7 @@ from paddle.fluid import core
from paddle.fluid import Linear
from test_imperative_base import new_program_scope
import paddle.fluid.dygraph_utils as dygraph_utils
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
import paddle
......@@ -629,6 +630,16 @@ class TestDygraphUtils(unittest.TestCase):
res2 = fluid.layers.sigmoid(a)
self.assertTrue(np.allclose(res1.numpy(), res2.numpy()))
def test_append_activation_in_dygraph3(self):
a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
helper = LayerObjectHelper(fluid.unique_name.generate("test"))
func = helper.append_activation
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
res1 = func(a, act="sigmoid", use_cudnn=True)
res2 = fluid.layers.sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
def test_append_bias_in_dygraph_exception(self):
with new_program_scope():
np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册