Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
1f598dfa
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1f598dfa
编写于
12月 06, 2019
作者:
B
bingyanghuang
提交者:
Tao Luo
12月 06, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cherry-pick MKL-DNN NHWC FWD support fix (#21593)
上级
f83254d6
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
77 addition
and
3 deletion
+77
-3
paddle/fluid/operators/batch_norm_op.cc
paddle/fluid/operators/batch_norm_op.cc
+35
-2
paddle/fluid/operators/batch_norm_op.h
paddle/fluid/operators/batch_norm_op.h
+4
-0
paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc
paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc
+16
-1
python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py
...fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py
+7
-0
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+15
-0
未找到文件。
paddle/fluid/operators/batch_norm_op.cc
浏览文件 @
1f598dfa
...
...
@@ -79,8 +79,9 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
x_dims
,
x_dims
.
size
());
const
int64_t
C
=
(
data_layout
==
DataLayout
::
kNCHW
?
x_dims
[
1
]
:
x_dims
[
x_dims
.
size
()
-
1
]);
((
this
->
IsMKLDNNType
()
==
true
)
||
(
data_layout
==
DataLayout
::
kNCHW
)
?
x_dims
[
1
]
:
x_dims
[
x_dims
.
size
()
-
1
]);
auto
scale_dim
=
ctx
->
GetInputDim
(
"Scale"
);
auto
bias_dim
=
ctx
->
GetInputDim
(
"Bias"
);
...
...
@@ -154,6 +155,32 @@ framework::OpKernelType BatchNormOp::GetExpectedKernelType(
library
);
}
framework
::
OpKernelType
BatchNormOp
::
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
{
#ifdef PADDLE_WITH_MKLDNN
// Only input require reshaping, weights and
// bias are having shape in NCHW order
if
((
var_name
==
"X"
)
&&
(
expected_kernel_type
.
data_layout_
==
framework
::
DataLayout
::
kMKLDNN
)
&&
(
tensor
.
layout
()
!=
framework
::
DataLayout
::
kMKLDNN
))
{
auto
attrs
=
Attrs
();
auto
ar
=
paddle
::
framework
::
AttrReader
(
attrs
);
const
std
::
string
data_layout
=
ar
.
Get
<
std
::
string
>
(
"data_layout"
);
auto
dl
=
framework
::
StringToDataLayout
(
data_layout
);
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
framework
::
DataLayout
::
kAnyLayout
)
{
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
framework
::
StringToDataLayout
(
data_layout
));
}
}
#endif
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
tensor
.
layout
());
}
void
BatchNormOpMaker
::
Make
()
{
AddAttr
<
bool
>
(
"is_test"
,
"(bool, default false) Set to true for inference only, false "
...
...
@@ -446,6 +473,12 @@ framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
#ifdef PADDLE_WITH_MKLDNN
if
(
library
==
framework
::
LibraryType
::
kPlain
&&
platform
::
CanMKLDNNBeUsed
(
ctx
))
{
// TODO(jczaja): Add support for NHWC
const
std
::
string
data_layout
=
ctx
.
Attr
<
std
::
string
>
(
"data_layout"
);
PADDLE_ENFORCE_NE
(
data_layout
,
"NHWC"
,
platform
::
errors
::
Unimplemented
(
"Batch Norm MKLDNN grad does not support NHWC data format yet"
));
library
=
framework
::
LibraryType
::
kMKLDNN
;
layout
=
framework
::
DataLayout
::
kMKLDNN
;
}
...
...
paddle/fluid/operators/batch_norm_op.h
浏览文件 @
1f598dfa
...
...
@@ -47,6 +47,10 @@ class BatchNormOp : public framework::OperatorWithKernel {
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
framework
::
OpKernelType
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
override
;
};
class
BatchNormGradOp
:
public
framework
::
OperatorWithKernel
{
...
...
paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc
浏览文件 @
1f598dfa
...
...
@@ -775,8 +775,23 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
* ('any') which lets a primitive (conv backward in this case) choose
* the memory format preferred for best performance
*/
auto
chosen_memory_format
=
MKLDNNMemoryFormat
::
any
;
// TODO(jczaja): Once GRAD NHWC is working then format 'any'
// should be used exclusively. But till forward pass enforce
// NCHW for training we need to have NCHW here as well
// to avoid performance degradation in relu_grad and pool2d_grad
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
auto
chosen_memory_format
=
platform
::
data_format_to_memory_format
(
data_format
);
weights_format
=
MKLDNNMemoryFormat
::
any
;
// Check the format for user's special output
if
(
chosen_memory_format
!=
MKLDNNMemoryFormat
::
any
)
{
if
(
is_conv3d
)
{
chosen_memory_format
=
platform
::
MKLDNNFormatForSize
(
src_tz
.
size
(),
chosen_memory_format
);
}
}
auto
src_md
=
platform
::
MKLDNNMemDesc
(
src_tz
,
platform
::
MKLDNNGetDataType
<
T
>
(),
chosen_memory_format
);
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py
浏览文件 @
1f598dfa
...
...
@@ -84,6 +84,13 @@ class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference):
self
.
check_with_place
(
place
,
data_format
,
self
.
dtype
,
[
2
,
3
,
4
,
5
])
class
TestMKLDNNBatchNormOpInference_NHWC
(
TestMKLDNNBatchNormOpInference
):
def
test_check_output
(
self
):
place
=
core
.
CPUPlace
()
data_format
=
"NHWC"
self
.
check_with_place
(
place
,
data_format
,
self
.
dtype
,
[
2
,
4
,
5
,
3
])
class
TestMKLDNNBatchNormOpWithReluInference
(
TestBatchNormOpInference
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
...
...
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
1f598dfa
...
...
@@ -259,6 +259,21 @@ class TestBatchNormOpInference(unittest.TestCase):
batch_norm_op
.
run
(
scope
,
place
)
# When op is called without Executor then
# MKL-DNN Tensor is returned. For NHWC data layout
# dims will be in NCHW order as it is MKL-DNN way
# of memory descripting. So we need to convert NCHW
# dims into NHWC.
if
data_layout
==
"NHWC"
and
self
.
use_mkldnn
==
True
:
# Create executor to have MKL-DNN cache
# cleared after NHWC unit test
place
=
core
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
dims
=
y_tensor
.
shape
()
c
=
dims
.
pop
(
1
)
dims
.
append
(
c
)
y_tensor
.
_set_dims
(
dims
)
# check inference result
self
.
__assert_close
(
y_tensor
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录