Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b0378963
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b0378963
编写于
6月 29, 2018
作者:
T
Tao Luo
提交者:
GitHub
6月 29, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11666 from mozga-intel/mozga-intel/Batch_norm_support_other_type
The mkldnn batch norm supports other data format
上级
fff6fa0f
61c54dbb
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
38 addition
and
21 deletion
+38
-21
paddle/fluid/framework/data_layout_transform.cc
paddle/fluid/framework/data_layout_transform.cc
+2
-2
paddle/fluid/framework/data_layout_transform.h
paddle/fluid/framework/data_layout_transform.h
+0
-6
paddle/fluid/framework/data_transform.cc
paddle/fluid/framework/data_transform.cc
+6
-2
paddle/fluid/operators/batch_norm_mkldnn_op.cc
paddle/fluid/operators/batch_norm_mkldnn_op.cc
+19
-10
paddle/fluid/platform/mkldnn_helper.h
paddle/fluid/platform/mkldnn_helper.h
+11
-1
未找到文件。
paddle/fluid/framework/data_layout_transform.cc
浏览文件 @
b0378963
...
@@ -147,9 +147,9 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
...
@@ -147,9 +147,9 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
"Input tensor type is not supported: "
,
in
.
type
().
name
());
"Input tensor type is not supported: "
,
in
.
type
().
name
());
memory
::
data_type
out_type
=
in_type
;
memory
::
data_type
out_type
=
in_type
;
auto
in_format
=
MKLDNNFormatForSize
(
in_tz
.
size
(),
in
.
format
());
auto
in_format
=
platform
::
MKLDNNFormatForSize
(
in_tz
.
size
(),
in
.
format
());
auto
out_format
=
auto
out_format
=
MKLDNNFormatForSize
(
in_tz
.
size
(),
ToMKLDNNFormat
(
out_layout
));
platform
::
MKLDNNFormatForSize
(
in_tz
.
size
(),
ToMKLDNNFormat
(
out_layout
));
void
*
in_data
=
GetDataFromTensor
(
in
,
in_type
);
void
*
in_data
=
GetDataFromTensor
(
in
,
in_type
);
...
...
paddle/fluid/framework/data_layout_transform.h
浏览文件 @
b0378963
...
@@ -62,12 +62,6 @@ inline MKLDNNDataType ToMKLDNNDataType(const std::type_index type) {
...
@@ -62,12 +62,6 @@ inline MKLDNNDataType ToMKLDNNDataType(const std::type_index type) {
return
MKLDNNDataType
::
data_undef
;
return
MKLDNNDataType
::
data_undef
;
}
}
inline
MKLDNNFormat
MKLDNNFormatForSize
(
size_t
dims_size
,
MKLDNNFormat
default_format
)
{
return
(
dims_size
==
1
?
mkldnn
::
memory
::
format
::
x
:
dims_size
==
2
?
mkldnn
::
memory
::
format
::
nc
:
default_format
);
}
#endif
#endif
void
TransDataLayoutFromMKLDNN
(
const
OpKernelType
&
kernel_type_for_var
,
void
TransDataLayoutFromMKLDNN
(
const
OpKernelType
&
kernel_type_for_var
,
...
...
paddle/fluid/framework/data_transform.cc
浏览文件 @
b0378963
...
@@ -18,6 +18,10 @@ limitations under the License. */
...
@@ -18,6 +18,10 @@ limitations under the License. */
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/data_type_transform.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
...
@@ -48,8 +52,8 @@ void TransformData(const OpKernelType &expected_kernel_type,
...
@@ -48,8 +52,8 @@ void TransformData(const OpKernelType &expected_kernel_type,
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
// Just set layout/format. No real transform occur
// Just set layout/format. No real transform occur
auto
out_format
=
auto
out_format
=
platform
::
MKLDNNFormatForSize
(
in
.
dims
().
size
(),
MKLDNNFormatForSize
(
in
.
dims
().
size
(),
ToMKLDNNFormat
(
lin
));
ToMKLDNNFormat
(
lin
));
out
.
ShareDataWith
(
input_tensor
);
out
.
ShareDataWith
(
input_tensor
);
out
.
set_layout
(
DataLayout
::
kMKLDNN
);
out
.
set_layout
(
DataLayout
::
kMKLDNN
);
...
...
paddle/fluid/operators/batch_norm_mkldnn_op.cc
浏览文件 @
b0378963
...
@@ -115,9 +115,12 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
...
@@ -115,9 +115,12 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
if
(
fuse_with_relu
)
flags
|=
mkldnn
::
fuse_bn_relu
;
if
(
fuse_with_relu
)
flags
|=
mkldnn
::
fuse_bn_relu
;
// create mkldnn memory from input x tensor
// create mkldnn memory from input x tensor
auto
src_memory
=
mkldnn
::
memory
::
format
input_format
=
memory
({{{
src_tz
},
memory
::
data_type
::
f32
,
x
->
format
()},
mkldnn_engine
},
platform
::
MKLDNNFormatForSize
(
src_tz
.
size
(),
x
->
format
());
to_void_cast
(
x_data
));
auto
src_memory
=
memory
(
{{{
src_tz
},
memory
::
data_type
::
f32
,
input_format
},
mkldnn_engine
},
to_void_cast
(
x_data
));
// create primitive descriptor for batch norm forward
// create primitive descriptor for batch norm forward
using
bn_fwd_types
=
bn_type_traits
<
mkldnn
::
batch_normalization_forward
>
;
using
bn_fwd_types
=
bn_type_traits
<
mkldnn
::
batch_normalization_forward
>
;
...
@@ -251,15 +254,21 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
...
@@ -251,15 +254,21 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
using
bn_bwd_types
=
bn_type_traits
<
mkldnn
::
batch_normalization_backward
>
;
using
bn_bwd_types
=
bn_type_traits
<
mkldnn
::
batch_normalization_backward
>
;
// create mkldnn memory from input diff_y tensor
// create mkldnn memory from input diff_y tensor
auto
user_diff_dst_memory
=
memory
({{{
diff_dst_tz
},
memory
::
data_type
::
f32
,
diff_y
->
format
()},
mkldnn
::
memory
::
format
dst_format
=
mkldnn_engine
},
platform
::
MKLDNNFormatForSize
(
src_tz
.
size
(),
diff_y
->
format
());
to_void_cast
(
diff_y_data
));
auto
user_diff_dst_memory
=
memory
(
{{{
diff_dst_tz
},
memory
::
data_type
::
f32
,
dst_format
},
mkldnn_engine
},
to_void_cast
(
diff_y_data
));
// create mkldnn memory from input x tensor
// create mkldnn memory from input x tensor
auto
src_memory
=
mkldnn
::
memory
::
format
input_format
=
memory
({{{
src_tz
},
memory
::
data_type
::
f32
,
x
->
format
()},
mkldnn_engine
},
platform
::
MKLDNNFormatForSize
(
src_tz
.
size
(),
x
->
format
());
to_void_cast
(
x_data
));
auto
src_memory
=
memory
(
{{{
src_tz
},
memory
::
data_type
::
f32
,
input_format
},
mkldnn_engine
},
to_void_cast
(
x_data
));
// for diff_dst, try to use same format as dst in forward pass
// for diff_dst, try to use same format as dst in forward pass
auto
diff_dst_pd
=
batch_norm_fwd_pd
.
get
()
->
dst_primitive_desc
();
auto
diff_dst_pd
=
batch_norm_fwd_pd
.
get
()
->
dst_primitive_desc
();
...
...
paddle/fluid/platform/mkldnn_helper.h
浏览文件 @
b0378963
...
@@ -228,7 +228,7 @@ class MKLDNNHandler {
...
@@ -228,7 +228,7 @@ class MKLDNNHandler {
return
dstr
;
return
dstr
;
};
};
return
dims2str
(
operand_dims
)
+
suffix
;
return
dims2str
(
operand_dims
)
+
suffix
;
}
;
}
protected:
protected:
const
MKLDNNDeviceContext
&
dev_ctx_
;
const
MKLDNNDeviceContext
&
dev_ctx_
;
...
@@ -237,5 +237,15 @@ class MKLDNNHandler {
...
@@ -237,5 +237,15 @@ class MKLDNNHandler {
bool
is_reusing_
;
bool
is_reusing_
;
};
};
inline
mkldnn
::
memory
::
format
MKLDNNFormatForSize
(
size_t
dims_size
,
mkldnn
::
memory
::
format
data_format
)
{
if
(
dims_size
==
1
)
{
return
mkldnn
::
memory
::
format
::
x
;
}
else
if
(
dims_size
==
2
)
{
return
mkldnn
::
memory
::
format
::
nc
;
}
return
data_format
;
}
}
// namespace platform
}
// namespace platform
}
// namespace paddle
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录