Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
b0b27ff6
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b0b27ff6
编写于
1月 06, 2020
作者:
J
Jacek Czaja
提交者:
Tao Luo
1月 06, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MKL-DNN] Conv grad and Batch Norm grad NHWC support (#22088)
上级
1ce6ab9c
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
91 addition
and
40 deletion
+91
-40
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+2
-4
paddle/fluid/operators/batch_norm_op.cc
paddle/fluid/operators/batch_norm_op.cc
+32
-11
paddle/fluid/operators/batch_norm_op.h
paddle/fluid/operators/batch_norm_op.h
+4
-0
paddle/fluid/operators/controlflow/fetch_op.cc
paddle/fluid/operators/controlflow/fetch_op.cc
+6
-1
paddle/fluid/operators/conv_op.cc
paddle/fluid/operators/conv_op.cc
+28
-12
paddle/fluid/operators/conv_op.h
paddle/fluid/operators/conv_op.h
+4
-0
paddle/fluid/platform/mkldnn_helper.h
paddle/fluid/platform/mkldnn_helper.h
+5
-0
python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py
...fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py
+10
-0
python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py
...dle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py
+0
-12
未找到文件。
paddle/fluid/framework/operator.cc
浏览文件 @
b0b27ff6
...
...
@@ -1215,10 +1215,8 @@ Scope* OperatorWithKernel::PrepareData(
// The reason is that if a gpu tensor is the input of a cpu kernel,
// we will create a new cpu tensor in new scope.
// However, if enable_cache_runtime_context_, we get the cpu tensor each
// time, not the gpu tensor.
// Thus, we set pre_scope_ = nullptr to trigger `new RuntimeContext()`
// in
// RunImpl().
// time, not the gpu tensor. Thus, we set pre_scope_ = nullptr
// to trigger `new RuntimeContext()` in RunImpl().
if
(
enable_cache_runtime_context_
)
{
pre_scope_
=
nullptr
;
}
...
...
paddle/fluid/operators/batch_norm_op.cc
浏览文件 @
b0b27ff6
...
...
@@ -186,9 +186,8 @@ framework::OpKernelType BatchNormOp::GetKernelTypeForVar(
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
framework
::
DataLayout
::
kAnyLayout
)
{
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
framework
::
StringToDataLayout
(
data_layout
));
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
dl
);
}
}
#endif
...
...
@@ -465,8 +464,11 @@ void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
const
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
const
DataLayout
data_layout
=
framework
::
StringToDataLayout
(
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"data_layout"
));
const
int
C
=
(
data_layout
==
DataLayout
::
kNCHW
?
x_dims
[
1
]
:
x_dims
[
x_dims
.
size
()
-
1
]);
const
int
C
=
((
this
->
IsMKLDNNType
()
==
true
)
||
(
data_layout
==
DataLayout
::
kNCHW
)
?
x_dims
[
1
]
:
x_dims
[
x_dims
.
size
()
-
1
]);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
x_dims
);
// has_scale_grad == has_bias_grad, judge has_scale_grad is enough
...
...
@@ -499,12 +501,6 @@ framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
#ifdef PADDLE_WITH_MKLDNN
if
(
library
==
framework
::
LibraryType
::
kPlain
&&
platform
::
CanMKLDNNBeUsed
(
ctx
))
{
// TODO(jczaja): Add support for NHWC
const
std
::
string
data_layout
=
ctx
.
Attr
<
std
::
string
>
(
"data_layout"
);
PADDLE_ENFORCE_NE
(
data_layout
,
"NHWC"
,
platform
::
errors
::
Unimplemented
(
"Batch Norm MKLDNN grad does not support NHWC data format yet"
));
library
=
framework
::
LibraryType
::
kMKLDNN
;
layout
=
framework
::
DataLayout
::
kMKLDNN
;
}
...
...
@@ -515,6 +511,31 @@ framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
library
);
}
framework
::
OpKernelType
BatchNormGradOp
::
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
{
#ifdef PADDLE_WITH_MKLDNN
// Only input require reshaping, weights and
// bias are having shape in NCHW order
if
(((
var_name
==
"X"
)
||
(
var_name
==
framework
::
GradVarName
(
"Y"
)))
&&
(
expected_kernel_type
.
data_layout_
==
framework
::
DataLayout
::
kMKLDNN
)
&&
(
tensor
.
layout
()
!=
framework
::
DataLayout
::
kMKLDNN
))
{
auto
attrs
=
Attrs
();
auto
ar
=
paddle
::
framework
::
AttrReader
(
attrs
);
const
std
::
string
data_layout
=
ar
.
Get
<
std
::
string
>
(
"data_layout"
);
auto
dl
=
framework
::
StringToDataLayout
(
data_layout
);
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
framework
::
DataLayout
::
kAnyLayout
)
{
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
dl
);
}
}
#endif
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
tensor
.
layout
());
}
template
<
typename
T
>
class
BatchNormGradKernel
<
platform
::
CPUDeviceContext
,
T
>
:
public
framework
::
OpKernel
<
T
>
{
...
...
paddle/fluid/operators/batch_norm_op.h
浏览文件 @
b0b27ff6
...
...
@@ -148,6 +148,10 @@ class BatchNormGradOp : public framework::OperatorWithKernel {
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
framework
::
OpKernelType
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
override
;
};
class
BatchNormOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
...
...
paddle/fluid/operators/controlflow/fetch_op.cc
浏览文件 @
b0b27ff6
...
...
@@ -60,8 +60,13 @@ class FetchOp : public framework::OperatorBase {
// Conversion from MKL-DNN to Paddle
if
(
src_item
.
layout
()
==
framework
::
DataLayout
::
kMKLDNN
)
{
framework
::
Tensor
out
;
// Convert to desired Paddle layout, apart from grads of filter
// as params are not a subject to paddle's data_format
framework
::
innerTransDataLayoutFromMKLDNN
(
src_item
.
layout
(),
paddle
::
platform
::
get_cur_paddle_data_layout
(),
src_item
.
layout
(),
fetch_var_name
==
framework
::
GradVarName
(
"Filter"
)
?
framework
::
DataLayout
::
kNCHW
:
paddle
::
platform
::
get_cur_paddle_data_layout
(),
src_item
,
&
out
,
platform
::
CPUPlace
());
TensorCopySync
(
out
,
platform
::
CPUPlace
(),
&
dst_item
);
}
else
{
...
...
paddle/fluid/operators/conv_op.cc
浏览文件 @
b0b27ff6
...
...
@@ -208,9 +208,8 @@ framework::OpKernelType ConvOp::GetKernelTypeForVar(
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
framework
::
DataLayout
::
kAnyLayout
)
{
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
framework
::
StringToDataLayout
(
data_format
));
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
dl
);
}
}
#endif
...
...
@@ -554,16 +553,7 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
#ifdef PADDLE_WITH_MKLDNN
if
(
library_
==
framework
::
LibraryType
::
kPlain
&&
platform
::
CanMKLDNNBeUsed
(
ctx
))
{
// TODO(jczaja): Add support for NHWC
const
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
PADDLE_ENFORCE_NE
(
data_format
,
"NHWC"
,
platform
::
errors
::
Unimplemented
(
"Conv MKLDNN grad does not support NHWC data format yet"
));
PADDLE_ENFORCE_NE
(
data_format
,
"NDHWC"
,
platform
::
errors
::
Unimplemented
(
"Conv MKLDNN Grad does not support NDHWC data format yet"
));
library_
=
framework
::
LibraryType
::
kMKLDNN
;
layout_
=
framework
::
DataLayout
::
kMKLDNN
;
customized_type_value
=
kConvMKLDNNFP32
;
...
...
@@ -591,6 +581,32 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
return
type
;
}
framework
::
OpKernelType
ConvOpGrad
::
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
{
#ifdef PADDLE_WITH_MKLDNN
// Only input require reshaping, weights and
// bias are having shape in NCHW order
if
(((
var_name
==
"Input"
)
||
(
var_name
==
framework
::
GradVarName
(
"Output"
)))
&&
(
expected_kernel_type
.
data_layout_
==
framework
::
DataLayout
::
kMKLDNN
)
&&
(
tensor
.
layout
()
!=
framework
::
DataLayout
::
kMKLDNN
))
{
auto
attrs
=
Attrs
();
auto
ar
=
paddle
::
framework
::
AttrReader
(
attrs
);
const
std
::
string
data_format
=
ar
.
Get
<
std
::
string
>
(
"data_format"
);
auto
dl
=
framework
::
StringToDataLayout
(
data_format
);
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
framework
::
DataLayout
::
kAnyLayout
)
{
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
dl
);
}
}
#endif
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
tensor
.
layout
());
}
template
<
typename
T
>
class
Conv2DGradMaker
:
public
framework
::
SingleGradOpMaker
<
T
>
{
public:
...
...
paddle/fluid/operators/conv_op.h
浏览文件 @
b0b27ff6
...
...
@@ -272,6 +272,10 @@ class ConvOpGrad : public framework::OperatorWithKernel {
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
framework
::
OpKernelType
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
override
;
};
class
ConvOpDoubleGrad
:
public
framework
::
OperatorWithKernel
{
...
...
paddle/fluid/platform/mkldnn_helper.h
浏览文件 @
b0b27ff6
...
...
@@ -74,6 +74,11 @@ tf_pd<Type> MKLDNNBwdPrimitiveDesc(const Engine& e, const Primitive& p,
inline
void
MatchShapeToLayout
(
framework
::
Tensor
*
tensor_in
,
framework
::
DataLayout
from
,
framework
::
DataLayout
to
)
{
// Shape changing makes sense for 3+ dims Tensors
if
(
tensor_in
->
dims
().
size
()
<
3
)
{
return
;
}
switch
(
from
)
{
case
framework
:
:
DataLayout
::
kMKLDNN
:
if
(
to
==
framework
::
DataLayout
::
kNHWC
)
{
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py
浏览文件 @
b0b27ff6
...
...
@@ -34,6 +34,10 @@ class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):
def
ref_forward_backward
(
self
,
x
,
y_grad
,
scale
,
bias
,
mean
,
variance
,
epsilon
,
momentum
,
shape
,
data_layout
):
if
data_layout
!=
"NCHW"
and
data_layout
!=
"NHWC"
:
raise
ValueError
(
"Unknown data order."
)
# run forward
y
,
saved_mean
,
saved_variance
=
_reference_training
(
x
,
scale
,
bias
,
epsilon
,
data_layout
)
...
...
@@ -46,6 +50,12 @@ class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):
return
y
,
mean_out
,
variance_out
,
saved_mean
,
saved_variance
,
x_grad
,
scale_grad
,
bias_grad
class
TestMKLDNNBatchNormOpTraining_NHWC
(
TestMKLDNNBatchNormOpTraining
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_formats
=
[
"NHWC"
]
class
TestMKLDNNBatchNormOpExistedPrimitives
(
TestMKLDNNBatchNormOpTraining
):
def
init_test_case
(
self
):
TestMKLDNNBatchNormOpTraining
.
init_test_case
(
self
)
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py
浏览文件 @
b0b27ff6
...
...
@@ -208,18 +208,6 @@ class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN):
N
,
C
,
H
,
W
=
self
.
input_size
self
.
input_size
=
[
N
,
H
,
W
,
C
]
#TODO(jczaja): Enable once GRAD op is adjusted
def
test_check_grad
(
self
):
pass
#TODO(jczaja): Enable once GRAD op is adjusted
def
test_check_grad_no_filter
(
self
):
pass
#TODO(jczaja): Enable once GRAD op is adjusted
def
test_check_grad_no_input
(
self
):
pass
class
TestConv2dOp_Same_NHWC_MKLDNN
(
TestConv2dOp_Valid_NHWC_MKLDNN
):
def
init_paddings
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录