Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1ba81500
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1ba81500
编写于
11月 29, 2021
作者:
P
piotrekobiIntel
提交者:
GitHub
11月 29, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add third batch of deprecated mkldnn namespace name changes (#37558)
上级
6b8a6220
变更
18
显示空白变更内容
内联
并排
Showing
18 changed file
with
70 addition
and
77 deletion
+70
-77
paddle/fluid/memory/detail/system_allocator.cc
paddle/fluid/memory/detail/system_allocator.cc
+1
-1
paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc
paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc
+4
-5
paddle/fluid/operators/matmul_op.cc
paddle/fluid/operators/matmul_op.cc
+1
-1
paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc
paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc
+2
-3
paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc
paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc
+4
-4
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
+1
-1
paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc
paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc
+2
-2
paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h
paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h
+1
-1
paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc
paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc
+3
-3
paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc
paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc
+1
-1
paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc
paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc
+9
-9
paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc
paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc
+12
-12
paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h
paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h
+2
-2
paddle/fluid/operators/mkldnn/split_mkldnn_op.cc
paddle/fluid/operators/mkldnn/split_mkldnn_op.cc
+1
-1
paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc
paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc
+17
-17
paddle/fluid/operators/optimizers/sgd_op.cc
paddle/fluid/operators/optimizers/sgd_op.cc
+1
-1
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h
+8
-8
paddle/fluid/platform/device_context.h
paddle/fluid/platform/device_context.h
+0
-5
未找到文件。
paddle/fluid/memory/detail/system_allocator.cc
浏览文件 @
1ba81500
...
...
@@ -48,7 +48,7 @@ void* AlignedMalloc(size_t size) {
void
*
p
=
nullptr
;
size_t
alignment
=
32ul
;
#ifdef PADDLE_WITH_MKLDNN
// refer to https://github.com/01org/mkl-dnn/blob/master/include/
mkldnn
.hpp
// refer to https://github.com/01org/mkl-dnn/blob/master/include/
dnnl
.hpp
// memory alignment
alignment
=
4096ul
;
#endif
...
...
paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -30,11 +30,10 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
public:
GRUMKLDNNHandler
(
const
paddle
::
framework
::
ExecutionContext
&
ctx
,
const
platform
::
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
mkldnn_engine
,
platform
::
Place
cpu_place
,
const
LoDTensor
*
input
,
const
Tensor
*
weight_h
,
const
Tensor
*
h0
,
const
bool
is_reverse
,
const
int64_t
N
,
const
int64_t
Ti
,
const
int64_t
IC
,
const
int64_t
OC
,
const
dnnl
::
engine
mkldnn_engine
,
platform
::
Place
cpu_place
,
const
LoDTensor
*
input
,
const
Tensor
*
weight_h
,
const
Tensor
*
h0
,
const
bool
is_reverse
,
const
int64_t
N
,
const
int64_t
Ti
,
const
int64_t
IC
,
const
int64_t
OC
,
const
std
::
string
&
unique_name
)
:
RNNMKLDNNHandler
<
T
,
dnnl
::
gru_forward
,
T_out
>
(
ctx
,
dev_ctx
,
mkldnn_engine
,
ctx
.
GetPlace
(),
input
,
weight_h
,
h0
,
...
...
paddle/fluid/operators/matmul_op.cc
浏览文件 @
1ba81500
...
...
@@ -730,7 +730,7 @@ class MatMulOp : public framework::OperatorWithKernel {
OperatorWithKernel
::
IndicateOrPromoteVarDataTypes
(
ctx
,
"X"
,
"Y"
);
#ifdef PADDLE_WITH_MKLDNN
using
mkldnn
::
memory
;
using
dnnl
::
memory
;
if
(
this
->
CanMKLDNNBeUsed
(
ctx
,
input_data_type
))
{
return
framework
::
OpKernelType
(
input_data_type
,
ctx
.
GetPlace
(),
framework
::
DataLayout
::
kMKLDNN
,
...
...
paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -36,9 +36,8 @@ class CastMKLDNNKernel : public framework::OpKernel<T> {
auto
x_paddle_type
=
framework
::
proto
::
VarType
::
Type
(
in_dtype
);
auto
out_paddle_type
=
framework
::
proto
::
VarType
::
Type
(
out_dtype
);
mkldnn
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x_paddle_type
);
mkldnn
::
memory
::
data_type
out_type
=
dnnl
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x_paddle_type
);
dnnl
::
memory
::
data_type
out_type
=
framework
::
ToMKLDNNDataType
(
out_paddle_type
);
auto
x_tz
=
framework
::
vectorize
(
x
->
dims
());
...
...
paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -76,8 +76,8 @@ class ExpandMKLDNNKernel : public paddle::framework::OpKernel<T> {
private:
dnnl
::
memory
::
format_tag
GetExtendedFormatTag
(
std
::
vector
<
int64_t
>&
dims
,
int
new_size
,
mkldnn
::
memory
::
format_tag
format_tag
)
const
{
mkldnn
::
memory
::
desc
md
(
dims
,
paddle
::
platform
::
MKLDNNGetDataType
<
T
>
(),
dnnl
::
memory
::
format_tag
format_tag
)
const
{
dnnl
::
memory
::
desc
md
(
dims
,
paddle
::
platform
::
MKLDNNGetDataType
<
T
>
(),
format_tag
);
std
::
vector
<
int64_t
>
new_dims
(
new_size
,
1
);
std
::
copy
(
dims
.
begin
(),
dims
.
end
(),
...
...
@@ -112,7 +112,7 @@ class ExpandGradMKLDNNKernel : public paddle::framework::OpKernel<T> {
auto
&
astream
=
MKLDNNDeviceContext
::
tls
().
get_stream
();
if
(
dout_vec_dims
==
dx_vec_dims
)
{
mkldnn
::
memory
::
data_type
dout_type
=
dnnl
::
memory
::
data_type
dout_type
=
paddle
::
framework
::
ToMKLDNNDataType
(
dout
->
type
());
paddle
::
platform
::
ReorderMKLDNNHandler
reorder_handler
(
dout_vec_dims
,
dout
->
type
(),
dout_type
,
onednn_engine
);
...
...
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -43,7 +43,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
}
tensor
->
set_layout
(
DataLayout
::
kMKLDNN
);
tensor
->
set_format
(
mkldnn
::
memory
::
format_tag
::
oihw
);
tensor
->
set_format
(
dnnl
::
memory
::
format_tag
::
oihw
);
}
};
}
// namespace operators
...
...
paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -25,7 +25,7 @@ class LayerNormMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT<
LayerNormMKLDNNHandler
(
const
std
::
vector
<
int64_t
>&
dims
,
const
float
&
epsilon
,
const
dnnl
::
normalization_flags
&
flags
,
const
bool
&
is_test
,
const
MKLDNNMemoryFormat
fmt
,
const
mkldnn
::
engine
engine
,
platform
::
Place
cpu_place
)
const
dnnl
::
engine
engine
,
platform
::
Place
cpu_place
)
:
platform
::
MKLDNNHandlerNoCachingT
<
T
,
dnnl
::
layer_normalization_forward
>
(
engine
,
cpu_place
)
{
auto
md
=
dnnl
::
memory
::
desc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
fmt
);
...
...
@@ -131,7 +131,7 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
}
if
(
with_scaleshift
)
{
std
::
shared_ptr
<
mkldnn
::
memory
>
scaleshift_memory
=
std
::
shared_ptr
<
dnnl
::
memory
>
scaleshift_memory
=
handler
.
AcquireScaleShiftMemory
(
scale
,
bias
);
args
.
insert
({
DNNL_ARG_SCALE_SHIFT
,
*
scaleshift_memory
});
}
...
...
paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h
浏览文件 @
1ba81500
...
...
@@ -32,7 +32,7 @@ class MatMulGradMKLDNNKernel : public framework::OpKernel<T> {
private:
void
ExecuteMatMulGrad
(
const
ExecutionContext
&
ctx
,
const
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
&
engine
,
Tensor
*
x
,
bool
trans_x
,
const
dnnl
::
engine
&
engine
,
Tensor
*
x
,
bool
trans_x
,
bool
is_fold_init_dims_x
,
Tensor
*
y
,
bool
trans_y
,
bool
is_fold_init_dims_y
,
Tensor
*
out
)
const
;
void
RunKernel
(
const
ExecutionContext
&
ctx
)
const
;
...
...
paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -33,7 +33,7 @@ template <typename T>
class
MatMulV2MKLDNNHandler
:
public
paddle
::
platform
::
MKLDNNHandlerNoCachingT
<
T
,
dnnl
::
matmul
>
{
public:
MatMulV2MKLDNNHandler
(
const
mkldnn
::
engine
engine
,
MatMulV2MKLDNNHandler
(
const
dnnl
::
engine
engine
,
paddle
::
platform
::
Place
cpu_place
,
const
std
::
vector
<
int64_t
>&
x_org_dims
,
bool
trans_x
,
const
std
::
vector
<
int64_t
>&
y_org_dims
,
bool
trans_y
,
...
...
@@ -132,7 +132,7 @@ class MatMulV2MKLDNNKernel
protected:
void
ExecuteMatMul
(
const
ExecutionContext
&
ctx
,
const
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
onednn_engine
,
const
dnnl
::
engine
onednn_engine
,
paddle
::
platform
::
Place
cpu_place
,
const
Tensor
*
x
,
std
::
vector
<
int64_t
>&
x_dims
,
bool
trans_x
,
const
Tensor
*
y
,
std
::
vector
<
int64_t
>&
y_dims
,
...
...
@@ -272,7 +272,7 @@ class MatMulV2GradMKLDNNKernel : public MatMulV2MKLDNNKernel<T> {
void
ReduceSumForMatmulGradOutput
(
const
ExecutionContext
&
ctx
,
const
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
onednn_engine
,
const
dnnl
::
engine
onednn_engine
,
const
Tensor
*
dx_tmp
,
Tensor
*
dx
,
std
::
vector
<
int64_t
>
dx_dims
)
const
{
paddle
::
platform
::
ReductionMKLDNNHandler
<
T
>
handler
(
...
...
paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -31,7 +31,7 @@ class PReluMKLDNNHandler
dnnl
::
prelu_backward
>
{
public:
PReluMKLDNNHandler
(
const
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
engine
,
platform
::
Place
cpu_place
,
const
dnnl
::
engine
engine
,
platform
::
Place
cpu_place
,
const
Tensor
*
x
,
const
Tensor
*
weights
,
const
std
::
string
&
uniq_name
,
const
std
::
string
&
mode
,
bool
is_test
=
false
)
...
...
paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -74,7 +74,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
auto
x_vec_dims
=
framework
::
vectorize
(
x_dims
);
mkldnn
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x
->
type
());
dnnl
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x
->
type
());
platform
::
ReorderMKLDNNHandler
reorder_handler
(
x_vec_dims
,
x
->
type
(),
x_type
,
onednn_engine
);
...
...
@@ -197,7 +197,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
}
protected:
static
mkldnn
::
memory
::
format_tag
getPlainFormatTag
(
const
Tensor
*
tensor
)
{
static
dnnl
::
memory
::
format_tag
getPlainFormatTag
(
const
Tensor
*
tensor
)
{
auto
tensor_dims_size
=
tensor
->
dims
().
size
();
PADDLE_ENFORCE_EQ
(
tensor_dims_size
<=
6
&&
tensor_dims_size
>=
1
,
true
,
...
...
@@ -206,17 +206,17 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
switch
(
tensor_dims_size
)
{
case
1
:
return
mkldnn
::
memory
::
format_tag
::
a
;
return
dnnl
::
memory
::
format_tag
::
a
;
case
2
:
return
mkldnn
::
memory
::
format_tag
::
ab
;
return
dnnl
::
memory
::
format_tag
::
ab
;
case
3
:
return
mkldnn
::
memory
::
format_tag
::
abc
;
return
dnnl
::
memory
::
format_tag
::
abc
;
case
4
:
return
mkldnn
::
memory
::
format_tag
::
abcd
;
return
dnnl
::
memory
::
format_tag
::
abcd
;
case
5
:
return
mkldnn
::
memory
::
format_tag
::
abcde
;
return
dnnl
::
memory
::
format_tag
::
abcde
;
default:
return
mkldnn
::
memory
::
format_tag
::
abcdef
;
return
dnnl
::
memory
::
format_tag
::
abcdef
;
}
}
...
...
@@ -324,7 +324,7 @@ class ReshapeGradMKLDNNKernel : public ReshapeMKLDNNKernel<T, op_name> {
auto
dout_vec_dims
=
framework
::
vectorize
(
dout
->
dims
());
mkldnn
::
memory
::
data_type
dout_type
=
dnnl
::
memory
::
data_type
dout_type
=
framework
::
ToMKLDNNDataType
(
dout
->
type
());
platform
::
ReorderMKLDNNHandler
reorder_handler
(
dout_vec_dims
,
dout
->
type
(),
dout_type
,
onednn_engine
);
...
...
paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -15,24 +15,24 @@ limitations under the License. */
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/mkldnn_reuse.h"
static
mkldnn
::
memory
::
format_tag
get_plain_format_tag
(
static
dnnl
::
memory
::
format_tag
get_plain_format_tag
(
const
paddle
::
framework
::
Tensor
*
tensor
)
{
auto
tensor_dims_size
=
tensor
->
dims
().
size
();
switch
(
tensor_dims_size
)
{
case
1
:
return
mkldnn
::
memory
::
format_tag
::
a
;
return
dnnl
::
memory
::
format_tag
::
a
;
case
2
:
return
mkldnn
::
memory
::
format_tag
::
ab
;
return
dnnl
::
memory
::
format_tag
::
ab
;
case
3
:
return
mkldnn
::
memory
::
format_tag
::
abc
;
return
dnnl
::
memory
::
format_tag
::
abc
;
case
4
:
return
mkldnn
::
memory
::
format_tag
::
abcd
;
return
dnnl
::
memory
::
format_tag
::
abcd
;
case
5
:
return
mkldnn
::
memory
::
format_tag
::
abcde
;
return
dnnl
::
memory
::
format_tag
::
abcde
;
}
return
mkldnn
::
memory
::
format_tag
::
abcdef
;
return
dnnl
::
memory
::
format_tag
::
abcdef
;
}
namespace
paddle
{
...
...
@@ -97,7 +97,7 @@ class SliceMKLDNNKernel : public framework::OpKernel<T> {
out
->
Resize
(
framework
::
make_ddim
(
slice_dims
));
mkldnn
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x
->
type
());
dnnl
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x
->
type
());
platform
::
ReorderMKLDNNHandler
reorder_handler
(
x_vec_dims
,
x
->
type
(),
x_type
,
onednn_engine
);
...
...
@@ -192,11 +192,11 @@ class SliceGradMKLDNNKernel : public framework::OpKernel<T> {
slice_dims
[
axes
[
i
]]
=
ends
[
i
]
-
starts
[
i
];
}
mkldnn
::
memory
::
data_type
dout_type
=
dnnl
::
memory
::
data_type
dout_type
=
framework
::
ToMKLDNNDataType
(
dout
->
type
());
mkldnn
::
memory
::
desc
md
(
dout_vec_dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
dnnl
::
memory
::
desc
md
(
dout_vec_dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
dout
->
format
());
mkldnn
::
memory
::
format_tag
reorder_format_tag
=
dnnl
::
memory
::
format_tag
reorder_format_tag
=
platform
::
GetMKLDNNFormat
(
md
.
reshape
(
slice_dims
));
platform
::
ReorderMKLDNNHandler
reorder_handler
(
slice_dims
,
dout
->
type
(),
...
...
paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h
浏览文件 @
1ba81500
...
...
@@ -25,7 +25,7 @@ class SoftplusMKLDNNHandler
:
public
platform
::
MKLDNNHandlerNoCachingT
<
T
,
dnnl
::
binary
>
{
public:
SoftplusMKLDNNHandler
(
const
framework
::
ExecutionContext
&
ctx
,
const
Tensor
*
x
,
const
float
beta
,
const
mkldnn
::
engine
engine
)
const
float
beta
,
const
dnnl
::
engine
engine
)
:
platform
::
MKLDNNHandlerNoCachingT
<
T
,
dnnl
::
binary
>
(
engine
,
ctx
.
GetPlace
())
{
auto
x_tz
=
framework
::
vectorize
(
x
->
dims
());
...
...
@@ -53,7 +53,7 @@ class SoftplusMKLDNNHandler
x_md
,
beta_md
,
x_md
);
}
std
::
shared_ptr
<
mkldnn
::
memory
>
AcquireBetaMemory
(
const
float
*
beta
)
{
std
::
shared_ptr
<
dnnl
::
memory
>
AcquireBetaMemory
(
const
float
*
beta
)
{
return
this
->
AcquireMemoryFromPrimitive
(
this
->
fwd_pd_
->
src1_desc
(),
platform
::
to_void_cast
<
float
>
(
beta
));
}
...
...
paddle/fluid/operators/mkldnn/split_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -90,7 +90,7 @@ class SplitMKLDNNKernel : public framework::OpKernel<T> {
auto
x_vec_dims
=
framework
::
vectorize
(
x_dims
);
mkldnn
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x
->
type
());
dnnl
::
memory
::
data_type
x_type
=
framework
::
ToMKLDNNDataType
(
x
->
type
());
auto
&
astream
=
platform
::
MKLDNNDeviceContext
::
tls
().
get_stream
();
...
...
paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc
浏览文件 @
1ba81500
...
...
@@ -29,14 +29,14 @@ class TransposeMKLDNNHandler {
public:
TransposeMKLDNNHandler
(
std
::
vector
<
int64_t
>&
dims
,
// NOLINT
std
::
vector
<
int
>&
axis
,
// NOLINT
mkldnn
::
engine
engine
)
dnnl
::
engine
engine
)
:
dims_
(
dims
),
axis_
(
axis
),
logical_axis_
(
dims
.
size
(),
0
),
engine_
(
engine
)
{}
std
::
shared_ptr
<
mkldnn
::
memory
>
AcquireSrcMemory
(
const
MKLDNNMemoryFormat
&
fmt
,
void
*
ptr
)
{
std
::
shared_ptr
<
dnnl
::
memory
>
AcquireSrcMemory
(
const
MKLDNNMemoryFormat
&
fmt
,
void
*
ptr
)
{
// Make memory descriptor using input format, unless it
// cannot be trusted (nchw) then make up memory fmt manually
for
(
size_t
i
=
0
;
i
<
this
->
logical_axis_
.
size
();
++
i
)
{
...
...
@@ -47,24 +47,24 @@ class TransposeMKLDNNHandler {
?
platform
::
MKLDNNMemDesc
(
dims_
,
platform
::
MKLDNNGetDataType
<
T
>
(),
fmt
)
:
Axis2MemoryDesc
(
dims_
,
logical_axis_
);
return
std
::
make_shared
<
mkldnn
::
memory
>
(
src_md
,
engine_
,
ptr
);
return
std
::
make_shared
<
dnnl
::
memory
>
(
src_md
,
engine_
,
ptr
);
}
std
::
shared_ptr
<
mkldnn
::
memory
>
AcquireDstMemory
(
framework
::
Tensor
*
output
,
std
::
shared_ptr
<
dnnl
::
memory
>
AcquireDstMemory
(
framework
::
Tensor
*
output
,
platform
::
Place
place
)
{
auto
dst_md
=
Axis2MemoryDesc
(
dims_
,
axis_
);
auto
dst_data
=
output
->
mutable_data
<
T
>
(
place
,
dst_md
.
get_size
());
return
std
::
make_shared
<
mkldnn
::
memory
>
(
dst_md
,
engine_
,
dst_data
);
return
std
::
make_shared
<
dnnl
::
memory
>
(
dst_md
,
engine_
,
dst_data
);
}
std
::
shared_ptr
<
mkldnn
::
reorder
>
AcquireTranspose
(
std
::
shared_ptr
<
mkldnn
::
memory
>
dst_memory_p
,
std
::
shared_ptr
<
mkldnn
::
memory
>
src_memory_p
)
{
return
std
::
make_shared
<
mkldnn
::
reorder
>
(
*
(
src_memory_p
),
*
(
dst_memory_p
));
std
::
shared_ptr
<
dnnl
::
reorder
>
AcquireTranspose
(
std
::
shared_ptr
<
dnnl
::
memory
>
dst_memory_p
,
std
::
shared_ptr
<
dnnl
::
memory
>
src_memory_p
)
{
return
std
::
make_shared
<
dnnl
::
reorder
>
(
*
(
src_memory_p
),
*
(
dst_memory_p
));
}
protected:
mkldnn
::
memory
::
desc
Axis2MemoryDesc
(
std
::
vector
<
int64_t
>&
nchw_tz
,
// NOLINT
dnnl
::
memory
::
desc
Axis2MemoryDesc
(
std
::
vector
<
int64_t
>&
nchw_tz
,
// NOLINT
std
::
vector
<
int
>&
axis
// NOLINT
)
{
size_t
ndims
=
axis
.
size
();
...
...
@@ -75,7 +75,7 @@ class TransposeMKLDNNHandler {
strides
[
axis
[
i
]]
=
total_stride
;
total_stride
*=
nchw_tz
[
axis
[
i
]];
}
mkldnn
::
memory
::
desc
mem_d
(
nchw_tz
,
platform
::
MKLDNNGetDataType
<
T
>
(),
dnnl
::
memory
::
desc
mem_d
(
nchw_tz
,
platform
::
MKLDNNGetDataType
<
T
>
(),
strides
);
return
mem_d
;
...
...
@@ -85,7 +85,7 @@ class TransposeMKLDNNHandler {
std
::
vector
<
int64_t
>
dims_
;
std
::
vector
<
int
>
axis_
;
std
::
vector
<
int
>
logical_axis_
;
mkldnn
::
engine
engine_
;
dnnl
::
engine
engine_
;
};
template
<
typename
T
>
...
...
paddle/fluid/operators/optimizers/sgd_op.cc
浏览文件 @
1ba81500
...
...
@@ -72,7 +72,7 @@ class SGDOp : public framework::OperatorWithKernel {
auto
data_type
=
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"Param"
);
#ifdef PADDLE_WITH_MKLDNN
using
mkldnn
::
memory
;
using
dnnl
::
memory
;
if
(
this
->
CanMKLDNNBeUsed
(
ctx
,
data_type
))
{
const
auto
*
param_var
=
ctx
.
InputVar
(
"Param"
);
const
auto
*
grad_var
=
ctx
.
InputVar
(
"Grad"
);
...
...
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h
浏览文件 @
1ba81500
...
...
@@ -69,7 +69,7 @@ class ReduceMKLDNNKernel : public framework::OpKernel<T> {
// In that case reorder must be executed to maintain compatibility with
// PaddlePaddle reduce op
if
(
input_dims
==
output_dims
)
{
mkldnn
::
memory
::
data_type
input_type
=
dnnl
::
memory
::
data_type
input_type
=
framework
::
ToMKLDNNDataType
(
input
->
type
());
platform
::
ReorderMKLDNNHandler
reorder_handler
(
input_dims
,
input
->
type
(),
input_type
,
onednn_engine
);
...
...
@@ -132,7 +132,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel<T> {
auto
*
input_dy
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
output_dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
mkldnn
::
memory
::
format_tag
x_format_tag
;
dnnl
::
memory
::
format_tag
x_format_tag
;
auto
input_dims
=
CalculateReducedDims
(
output_dx
,
input_dy
,
dims
,
reduce_all
,
keep_dim
);
auto
output_dims
=
framework
::
vectorize
(
output_dx
->
dims
());
...
...
@@ -175,7 +175,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel<T> {
}
protected:
mkldnn
::
memory
::
format_tag
getPlainFormatTag
(
const
Tensor
*
tensor
)
const
{
dnnl
::
memory
::
format_tag
getPlainFormatTag
(
const
Tensor
*
tensor
)
const
{
auto
tensor_dims_size
=
tensor
->
dims
().
size
();
PADDLE_ENFORCE_EQ
(
tensor_dims_size
<=
5
&&
tensor_dims_size
>=
1
,
true
,
...
...
@@ -184,16 +184,16 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel<T> {
switch
(
tensor_dims_size
)
{
case
1
:
return
mkldnn
::
memory
::
format_tag
::
a
;
return
dnnl
::
memory
::
format_tag
::
a
;
case
2
:
return
mkldnn
::
memory
::
format_tag
::
ab
;
return
dnnl
::
memory
::
format_tag
::
ab
;
case
3
:
return
mkldnn
::
memory
::
format_tag
::
abc
;
return
dnnl
::
memory
::
format_tag
::
abc
;
case
4
:
return
mkldnn
::
memory
::
format_tag
::
abcd
;
return
dnnl
::
memory
::
format_tag
::
abcd
;
}
return
mkldnn
::
memory
::
format_tag
::
abcde
;
return
dnnl
::
memory
::
format_tag
::
abcde
;
}
};
...
...
paddle/fluid/platform/device_context.h
浏览文件 @
1ba81500
...
...
@@ -48,7 +48,6 @@ limitations under the License. */
#ifdef PADDLE_WITH_MKLDNN
#include "dnnl.hpp"
#include "paddle/fluid/framework/data_layout.h"
namespace
mkldnn
=
dnnl
;
#endif
#include <map>
...
...
@@ -65,10 +64,6 @@ namespace mkldnn = dnnl;
#endif
#include "unsupported/Eigen/CXX11/Tensor"
// This aias is required for now so that namespace name changes can be made to
// less than 20 files at a time. After all the names are changed it will be
// removed.
namespace
Eigen
{
struct
DefaultDevice
;
struct
GpuDevice
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录