Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
80484245
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
80484245
编写于
8月 09, 2021
作者:
J
Jacek Czaja
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
- disabled caching of layer norm
上级
1148ce67
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
39 addition
and
49 deletion
+39
-49
paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc
paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc
+39
-49
未找到文件。
paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc
浏览文件 @
80484245
...
...
@@ -19,19 +19,15 @@ namespace paddle {
namespace
operators
{
template
<
typename
T
>
class
LayerNormMKLDNNHandler
:
public
platform
::
MKLDNNHandlerT
<
T
,
dnnl
::
layer_normalization_forward
>
{
class
LayerNormMKLDNNHandler
:
public
platform
::
MKLDNNHandlerNoCachingT
<
T
,
dnnl
::
layer_normalization_forward
>
{
public:
LayerNormMKLDNNHandler
(
const
std
::
vector
<
int64_t
>&
dims
,
const
float
&
epsilon
,
const
dnnl
::
normalization_flags
&
flags
,
const
bool
&
is_test
,
const
MKLDNNMemoryFormat
fmt
,
const
platform
::
MKLDNNDeviceContext
&
dev_ctx
,
platform
::
Place
cpu_place
,
const
std
::
string
&
uniq_name
)
:
platform
::
MKLDNNHandlerT
<
T
,
dnnl
::
layer_normalization_forward
>
(
dev_ctx
,
dev_ctx
.
GetEngine
(),
cpu_place
,
platform
::
CreateKey
(
dev_ctx
,
dims
,
uniq_name
))
{
if
(
!
this
->
isCached
())
{
const
mkldnn
::
engine
engine
,
platform
::
Place
cpu_place
)
:
platform
::
MKLDNNHandlerNoCachingT
<
T
,
dnnl
::
layer_normalization_forward
>
(
mkldnn_engine
,
cpu_place
)
{
auto
md
=
dnnl
::
memory
::
desc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
fmt
);
if
(
!
is_test
)
{
// TODO(grygielski) Delete forcing stats_md after DNNL 1.2 is introduced
...
...
@@ -39,25 +35,20 @@ class LayerNormMKLDNNHandler
{
begin
(
dims
),
end
(
dims
)
-
1
},
platform
::
MKLDNNGetDataType
<
float
>
(),
platform
::
MKLDNNFormatForSize
(
dims
.
size
()
-
1
,
MKLDNNMemoryFormat
::
nchw
));
this
->
AcquireForwardPrimitiveDescriptor
(
dnnl
::
prop_kind
::
forward_training
,
md
,
stats_md
,
epsilon
,
flags
);
this
->
AcquireForwardPrimitiveDescriptor
(
dnnl
::
prop_kind
::
forward_training
,
md
,
stats_md
,
epsilon
,
flags
);
}
else
{
this
->
AcquireForwardPrimitiveDescriptor
(
dnnl
::
prop_kind
::
forward_inference
,
md
,
epsilon
,
flags
);
}
}
}
std
::
shared_ptr
<
dnnl
::
memory
>
AcquireScaleShiftMemory
()
{
return
this
->
AcquireMemoryFromPrimitive
(
"@scaleshift_mem_p"
);
}
std
::
shared_ptr
<
dnnl
::
memory
>
AcquireScaleShiftMemory
(
std
::
vector
<
float
>&
scaleshift_data
)
{
// scaleshift_data comes from temporary buffer so we need to copy it into
// created memory primitivie
auto
scaleshift_mem
=
this
->
AcquireMemoryFromPrimitive
(
this
->
fwd_pd_
->
weights_desc
(),
"@scaleshift_mem_p"
);
auto
scaleshift_mem
=
this
->
AcquireMemoryFromPrimitive
(
this
->
fwd_pd_
->
weights_desc
()
);
auto
data_ptr
=
scaleshift_mem
->
get_data_handle
();
std
::
size_t
num_bytes
=
scaleshift_data
.
size
()
*
sizeof
(
float
);
std
::
memcpy
(
data_ptr
,
scaleshift_data
.
data
(),
num_bytes
);
...
...
@@ -68,7 +59,7 @@ class LayerNormMKLDNNHandler
T
*
mean_data
=
mean
->
mutable_data
<
T
>
(
this
->
place_
,
this
->
fwd_pd_
->
mean_desc
().
get_size
());
return
this
->
AcquireMemoryFromPrimitive
(
this
->
fwd_pd_
->
mean_desc
(),
mean_data
,
"@mean_mem_p"
);
mean_data
);
}
std
::
shared_ptr
<
dnnl
::
memory
>
AcquireVarianceMemory
(
...
...
@@ -76,7 +67,7 @@ class LayerNormMKLDNNHandler
T
*
variance_data
=
variance
->
mutable_data
<
T
>
(
this
->
place_
,
this
->
fwd_pd_
->
variance_desc
().
get_size
());
return
this
->
AcquireMemoryFromPrimitive
(
this
->
fwd_pd_
->
variance_desc
(),
variance_data
,
"@variance_mem_p"
);
variance_data
);
}
};
...
...
@@ -95,6 +86,7 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
MKLDNNDeviceContext
>();
const
auto
&
mkldnn_engine
=
dev_ctx
.
GetEngine
();
auto
src_tz
=
paddle
::
framework
::
vectorize
(
x
->
dims
());
PADDLE_ENFORCE_EQ
(
begin_norm_axis
,
(
src_tz
.
size
()
-
1
),
...
...
@@ -112,8 +104,8 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
}
LayerNormMKLDNNHandler
<
T
>
handler
(
src_tz
,
epsilon
,
flags
,
is_test
,
x
->
format
(),
dev_ctx
,
ctx
.
GetPlace
()
,
ctx
.
OutputName
(
"Y"
));
x
->
format
(),
mkldnn_engine
,
ctx
.
GetPlace
(
));
auto
src_memory
=
handler
.
AcquireSrcMemory
(
x
);
auto
dst_memory
=
handler
.
AcquireDstMemory
(
y
);
...
...
@@ -139,9 +131,8 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
args
.
insert
({
DNNL_ARG_VARIANCE
,
*
variance_memory
});
}
auto
scaleshift_memory
=
handler
.
AcquireScaleShiftMemory
()
;
auto
scaleshift_memory
=
nullptr
;
if
(
with_scaleshift
)
{
if
(
scaleshift_memory
==
nullptr
||
!
is_test
)
{
auto
scale_tz
=
paddle
::
framework
::
vectorize
(
scale
->
dims
());
const
unsigned
int
C
=
scale_tz
[
0
];
...
...
@@ -156,7 +147,6 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
bias
->
data
<
float
>
()
+
C
);
scaleshift_memory
=
handler
.
AcquireScaleShiftMemory
(
scaleshift_data
);
}
args
.
insert
({
DNNL_ARG_SCALE_SHIFT
,
*
scaleshift_memory
});
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录