Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
3a804a0e
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3a804a0e
编写于
4月 14, 2021
作者:
J
jakpiase
提交者:
GitHub
4月 14, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Added oneDNN reduce_op FWD kernel (#31816)
上级
3ac6c189
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
691 addition
and
13 deletion
+691
-13
paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc
...operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc
+2
-1
paddle/fluid/operators/elementwise/mkldnn/elementwise_mkldnn_op.h
...luid/operators/elementwise/mkldnn/elementwise_mkldnn_op.h
+15
-0
paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc
...operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc
+2
-1
paddle/fluid/operators/reduce_ops/mkldnn/reduce_max_mkldnn_op.cc
...fluid/operators/reduce_ops/mkldnn/reduce_max_mkldnn_op.cc
+34
-0
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mean_mkldnn_op.cc
...luid/operators/reduce_ops/mkldnn/reduce_mean_mkldnn_op.cc
+34
-0
paddle/fluid/operators/reduce_ops/mkldnn/reduce_min_mkldnn_op.cc
...fluid/operators/reduce_ops/mkldnn/reduce_min_mkldnn_op.cc
+34
-0
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h
+125
-0
paddle/fluid/operators/reduce_ops/mkldnn/reduce_sum_mkldnn_op.cc
...fluid/operators/reduce_ops/mkldnn/reduce_sum_mkldnn_op.cc
+34
-0
paddle/fluid/operators/reduce_ops/reduce_op.h
paddle/fluid/operators/reduce_ops/reduce_op.h
+27
-0
paddle/fluid/platform/mkldnn_reuse.h
paddle/fluid/platform/mkldnn_reuse.h
+3
-11
python/paddle/fluid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py
...luid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py
+185
-0
python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py
...dle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py
+194
-0
tools/static_mode_white_list.py
tools/static_mode_white_list.py
+2
-0
未找到文件。
paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc
浏览文件 @
3a804a0e
...
...
@@ -86,7 +86,8 @@ class EltwiseAddMKLDNNGradKernel : public ElemwiseGradKernel<T> {
platform
::
ReductionMKLDNNHandler
<
T
>
handler_sum
(
dnnl
::
algorithm
::
reduction_sum
,
0.0
f
,
0.0
f
,
dev_ctx
,
onednn_engine
,
ctx
.
GetPlace
(),
dout
,
dy
,
ctx
.
InputName
(
framework
::
GradVarName
(
"Out"
)));
ctx
.
InputName
(
framework
::
GradVarName
(
"Out"
)),
CalculateBroadcastedDims
(
dout
,
dy
));
auto
dy_memory_p
=
handler_sum
.
AcquireDstMemory
(
dy
);
auto
reduction_p
=
handler_sum
.
AcquireForwardPrimitive
();
reduction_p
->
execute
(
astream
,
{{
DNNL_ARG_SRC
,
*
reorder_src_memory_p
},
...
...
paddle/fluid/operators/elementwise/mkldnn/elementwise_mkldnn_op.h
浏览文件 @
3a804a0e
...
...
@@ -81,5 +81,20 @@ class EltwiseMKLDNNKernel : public framework::OpKernel<T> {
z
->
set_format
(
platform
::
GetMKLDNNFormat
(
*
dst_memory
));
}
};
inline
std
::
vector
<
int64_t
>
CalculateBroadcastedDims
(
const
Tensor
*
x
,
const
Tensor
*
y
)
{
const
auto
src_tz
=
framework
::
vectorize
(
x
->
dims
());
const
auto
dst_tz
=
framework
::
vectorize
(
y
->
dims
());
size_t
j
=
0
;
std
::
vector
<
int64_t
>
dst_tz_ex
(
src_tz
.
size
(),
1
);
for
(
size_t
i
=
0
;
i
<
src_tz
.
size
();
++
i
)
{
dst_tz_ex
[
i
]
=
(
src_tz
[
i
]
!=
dst_tz
[
j
])
?
1
:
dst_tz
[
j
++
];
if
(
j
==
dst_tz
.
size
())
break
;
}
return
dst_tz_ex
;
}
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc
浏览文件 @
3a804a0e
...
...
@@ -105,7 +105,8 @@ class EltwiseMulMKLDNNGradKernel : public ElemwiseGradKernel<T> {
platform
::
ReductionMKLDNNHandler
<
T
>
handler_sum
(
dnnl
::
algorithm
::
reduction_sum
,
0.0
f
,
0.0
f
,
dev_ctx
,
mkldnn_engine
,
ctx
.
GetPlace
(),
dout
,
dy
,
ctx
.
InputName
(
framework
::
GradVarName
(
"Out"
)));
ctx
.
InputName
(
framework
::
GradVarName
(
"Out"
)),
CalculateBroadcastedDims
(
dout
,
dy
));
auto
dy_memory_p
=
handler_sum
.
AcquireDstMemory
(
dy
);
auto
reduction_p
=
handler_sum
.
AcquireForwardPrimitive
();
// As source we use mem object with results from binary operation
...
...
paddle/fluid/operators/reduce_ops/mkldnn/reduce_max_mkldnn_op.cc
0 → 100644
浏览文件 @
3a804a0e
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h"
namespace
paddle
{
namespace
operators
{
template
<
typename
T
>
class
ReduceMaxMKLDNNKernel
:
public
ReduceMKLDNNKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
this
->
RunKernel
(
ctx
,
dnnl
::
algorithm
::
reduction_max
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_KERNEL
(
reduce_max
,
MKLDNN
,
paddle
::
platform
::
CPUPlace
,
ops
::
ReduceMaxMKLDNNKernel
<
float
>
,
ops
::
ReduceMaxMKLDNNKernel
<
paddle
::
platform
::
bfloat16
>
);
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mean_mkldnn_op.cc
0 → 100644
浏览文件 @
3a804a0e
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h"
namespace
paddle
{
namespace
operators
{
template
<
typename
T
>
class
ReduceMeanMKLDNNKernel
:
public
ReduceMKLDNNKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
this
->
RunKernel
(
ctx
,
dnnl
::
algorithm
::
reduction_mean
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_KERNEL
(
reduce_mean
,
MKLDNN
,
paddle
::
platform
::
CPUPlace
,
ops
::
ReduceMeanMKLDNNKernel
<
float
>
,
ops
::
ReduceMeanMKLDNNKernel
<
paddle
::
platform
::
bfloat16
>
);
paddle/fluid/operators/reduce_ops/mkldnn/reduce_min_mkldnn_op.cc
0 → 100644
浏览文件 @
3a804a0e
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h"
namespace
paddle
{
namespace
operators
{
template
<
typename
T
>
class
ReduceMinMKLDNNKernel
:
public
ReduceMKLDNNKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
this
->
RunKernel
(
ctx
,
dnnl
::
algorithm
::
reduction_min
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_KERNEL
(
reduce_min
,
MKLDNN
,
paddle
::
platform
::
CPUPlace
,
ops
::
ReduceMinMKLDNNKernel
<
float
>
,
ops
::
ReduceMinMKLDNNKernel
<
paddle
::
platform
::
bfloat16
>
);
paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h
0 → 100644
浏览文件 @
3a804a0e
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/mkldnn_reuse.h"
namespace
paddle
{
namespace
operators
{
using
paddle
::
framework
::
LoDTensor
;
using
paddle
::
framework
::
Tensor
;
using
platform
::
to_void_cast
;
template
<
typename
T
>
class
ReduceMKLDNNKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
RunKernel
(
const
framework
::
ExecutionContext
&
ctx
,
dnnl
::
algorithm
reduction_type
)
const
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
MKLDNNDeviceContext
>();
const
auto
&
onednn_engine
=
dev_ctx
.
GetEngine
();
const
auto
*
input
=
ctx
.
Input
<
LoDTensor
>
(
"X"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
auto
reduce_dims
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dim"
);
bool
reduce_all
=
ctx
.
Attr
<
bool
>
(
"reduce_all"
);
bool
keep_dim
=
ctx
.
Attr
<
bool
>
(
"keep_dim"
);
std
::
vector
<
int64_t
>
output_dims
=
CalculateOutputDims
(
input
,
output
,
reduce_dims
,
reduce_all
,
keep_dim
);
auto
input_dims
=
framework
::
vectorize
(
input
->
dims
());
auto
&
astream
=
platform
::
MKLDNNDeviceContext
::
tls
().
get_stream
();
// oneDNN reduce op does not support edge case in which memory is being
// copied without actual reduction.
// In that case reorder must be executed to maintain compatibility with
// PaddlePaddle reduce op
if
(
input_dims
==
output_dims
)
{
mkldnn
::
memory
::
data_type
input_type
=
framework
::
ToMKLDNNDataType
(
input
->
type
());
std
::
string
key
=
platform
::
CreateKey
(
dev_ctx
,
input_dims
,
input
->
format
(),
input
->
format
(),
input_type
);
platform
::
ReorderMKLDNNHandler
reorder_handler
(
input_dims
,
input
->
type
(),
input_type
,
dev_ctx
,
onednn_engine
,
key
);
auto
reorder_src_memory_p
=
reorder_handler
.
AcquireSrcMemory
(
input
->
format
(),
platform
::
to_void_cast
(
input
->
data
<
T
>
()));
auto
reorder_dst_memory_p
=
reorder_handler
.
AcquireDstMemory
(
output
,
input
->
format
(),
ctx
.
GetPlace
());
auto
reorder_p
=
reorder_handler
.
AcquireReorder
(
reorder_src_memory_p
,
reorder_dst_memory_p
);
platform
::
RecordEvent
record_reorder
(
"int_reorder"
,
platform
::
EventRole
::
kUniqueOp
);
reorder_p
->
execute
(
astream
,
*
reorder_src_memory_p
,
*
reorder_dst_memory_p
);
astream
.
wait
();
output
->
set_layout
(
framework
::
DataLayout
::
kMKLDNN
);
output
->
set_format
(
platform
::
GetMKLDNNFormat
(
reorder_dst_memory_p
->
get_desc
().
reshape
(
paddle
::
framework
::
vectorize
<
int64_t
>
(
output
->
dims
()))));
}
else
{
platform
::
ReductionMKLDNNHandler
<
T
>
handler
(
reduction_type
,
0.0
f
,
0.0
f
,
dev_ctx
,
onednn_engine
,
ctx
.
GetPlace
(),
input
,
output
,
ctx
.
InputName
(
"X"
),
output_dims
);
auto
src_memory_p
=
handler
.
AcquireSrcMemory
(
input
);
auto
dst_memory_p
=
handler
.
AcquireDstMemory
(
output
);
std
::
unordered_map
<
int
,
dnnl
::
memory
>
reduction_args
=
{
{
DNNL_ARG_SRC
,
*
src_memory_p
},
{
DNNL_ARG_DST
,
*
dst_memory_p
}};
auto
reduction_p
=
handler
.
AcquireForwardPrimitive
();
reduction_p
->
execute
(
astream
,
reduction_args
);
astream
.
wait
();
output
->
set_layout
(
framework
::
DataLayout
::
kMKLDNN
);
output
->
set_format
(
platform
::
GetMKLDNNFormat
(
dst_memory_p
->
get_desc
().
reshape
(
paddle
::
framework
::
vectorize
<
int64_t
>
(
output
->
dims
()))));
}
}
private:
std
::
vector
<
int64_t
>
CalculateOutputDims
(
const
Tensor
*
input
,
const
Tensor
*
output
,
std
::
vector
<
int
>&
reduce_dims
,
bool
reduce_all
,
bool
keep_dim
)
const
{
if
(
keep_dim
)
return
framework
::
vectorize
(
output
->
dims
());
if
(
reduce_all
)
return
std
::
vector
<
int64_t
>
(
framework
::
vectorize
(
input
->
dims
()).
size
(),
1
);
std
::
vector
<
int64_t
>
output_dims
(
framework
::
vectorize
(
input
->
dims
()));
for
(
size_t
i
=
0
;
i
<
reduce_dims
.
size
();
++
i
)
{
reduce_dims
[
i
]
=
(
reduce_dims
[
i
]
>=
0
)
?
reduce_dims
[
i
]
:
input
->
dims
().
size
()
+
reduce_dims
[
i
];
output_dims
[
reduce_dims
[
i
]]
=
1
;
}
return
output_dims
;
}
};
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/reduce_ops/mkldnn/reduce_sum_mkldnn_op.cc
0 → 100644
浏览文件 @
3a804a0e
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h"
namespace
paddle
{
namespace
operators
{
template
<
typename
T
>
class
ReduceSumMKLDNNKernel
:
public
ReduceMKLDNNKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
this
->
RunKernel
(
ctx
,
dnnl
::
algorithm
::
reduction_sum
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_KERNEL
(
reduce_sum
,
MKLDNN
,
paddle
::
platform
::
CPUPlace
,
ops
::
ReduceSumMKLDNNKernel
<
float
>
,
ops
::
ReduceSumMKLDNNKernel
<
paddle
::
platform
::
bfloat16
>
);
paddle/fluid/operators/reduce_ops/reduce_op.h
浏览文件 @
3a804a0e
...
...
@@ -489,6 +489,30 @@ class ReduceOp : public framework::OperatorWithKernel {
}
}
}
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
// choose cudnn kernel if the runtime supported.
auto
input_data_type
=
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
);
if
(
ctx
.
Input
<
paddle
::
framework
::
LoDTensor
>
(
"X"
)
->
dims
().
size
()
>
5
)
return
framework
::
OpKernelType
(
input_data_type
,
ctx
.
GetPlace
());
#ifdef PADDLE_WITH_MKLDNN
if
(
this
->
CanMKLDNNBeUsed
(
ctx
,
input_data_type
))
{
return
framework
::
OpKernelType
(
input_data_type
,
ctx
.
GetPlace
(),
framework
::
DataLayout
::
kMKLDNN
,
framework
::
LibraryType
::
kMKLDNN
);
}
#endif
if
(
input_data_type
==
framework
::
proto
::
VarType
::
FP16
)
{
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
platform
::
errors
::
InvalidArgument
(
"float16 can only be used on GPU place"
));
}
return
framework
::
OpKernelType
(
input_data_type
,
ctx
.
GetPlace
());
}
};
class
ReduceOpUseInputPlace
:
public
ReduceOp
{
...
...
@@ -579,6 +603,9 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker {
"(int, default -1)"
"The dtype of output, default value is -1, the dtype is same as intput"
)
.
SetDefault
(
-
1
);
AddAttr
<
bool
>
(
"use_mkldnn"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
AddComment
(
string
::
Sprintf
(
R"DOC(
%s Operator.
...
...
paddle/fluid/platform/mkldnn_reuse.h
浏览文件 @
3a804a0e
...
...
@@ -638,7 +638,8 @@ class ReductionMKLDNNHandler
const
float
eps
,
const
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
engine
,
platform
::
Place
cpu_place
,
const
Tensor
*
x
,
const
Tensor
*
y
,
const
std
::
string
&
uniq_name
)
const
std
::
string
&
uniq_name
,
std
::
vector
<
int64_t
>
output_dims
)
:
platform
::
MKLDNNHandlerT
<
T
,
dnnl
::
reduction
>
(
dev_ctx
,
engine
,
cpu_place
,
platform
::
CreateKey
(
dev_ctx
,
framework
::
vectorize
(
x
->
dims
()),
...
...
@@ -653,20 +654,11 @@ class ReductionMKLDNNHandler
platform
::
errors
::
InvalidArgument
(
"Wrong format set for X tensor."
));
const
auto
src_tz
=
framework
::
vectorize
(
x
->
dims
());
const
auto
dst_tz
=
framework
::
vectorize
(
y
->
dims
());
// For oneDNN dimensionality should match so we need to
// extend Y tensor dims with values of 1 (before and after pattern)
int
j
=
0
;
std
::
vector
<
int64_t
>
dst_tz_ex
(
src_tz
.
size
(),
1
);
for
(
size_t
i
=
0
;
i
<
src_tz
.
size
();
++
i
)
{
dst_tz_ex
[
i
]
=
(
src_tz
[
i
]
!=
dst_tz
[
j
])
?
1
:
dst_tz
[
j
++
];
}
const
auto
src_md
=
dnnl
::
memory
::
desc
(
src_tz
,
platform
::
MKLDNNGetDataType
<
T
>
(),
x
->
format
());
const
auto
dst_md
=
memory
::
desc
(
dst_tz_ex
,
platform
::
MKLDNNGetDataType
<
T
>
(),
x
->
format
());
output_dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
x
->
format
());
this
->
AcquireForwardPrimitiveDescriptor
(
algo
,
src_md
,
dst_md
,
p
,
eps
);
}
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py
0 → 100644
浏览文件 @
3a804a0e
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
numpy
as
np
from
paddle.fluid.tests.unittests.op_test
import
OpTest
,
skip_check_grad_ci
,
convert_float_to_uint16
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
paddle
@
unittest
.
skipIf
(
not
core
.
supports_bfloat16
(),
"place does not support BF16 evaluation"
)
@
unittest
.
skipIf
(
core
.
is_compiled_with_cuda
(),
"core is compiled with CUDA which has no BF implementation"
)
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSumDefaultBF16ONEDNNOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
axis
=
0
)}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
}
def
test_check_output
(
self
):
self
.
check_output
(
check_dygraph
=
False
)
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
5
,
10
,
5
,
5
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
,
'dim'
:
[
2
]}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DReduceAllWithoutReduceAllAttributeBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
normal
(
size
=
(
2
,
3
,
5
,
6
)).
astype
(
'float32'
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
,
'dim'
:
[
0
,
1
,
2
,
3
]}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DReduceAllWithoutReduceAllAttributeNegativeDimsBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
normal
(
size
=
(
2
,
7
,
3
,
5
)).
astype
(
'float32'
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
,
'dim'
:
[
-
1
,
-
2
,
-
3
,
-
4
]}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum5DKeepDimsONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
2
,
5
,
3
,
2
,
2
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'dim'
:
(
2
,
3
,
4
),
'keep_dim'
:
True
,
'use_mkldnn'
:
True
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
self
.
attrs
[
'keep_dim'
])
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum5DReduceAllKeepDimsBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
normal
(
size
=
(
2
,
5
,
3
,
2
,
4
)).
astype
(
'float32'
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'reduce_all'
:
True
,
'keep_dim'
:
True
,
'use_mkldnn'
:
True
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
keepdims
=
self
.
attrs
[
'keep_dim'
])}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DReduceAllBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
normal
(
size
=
(
4
,
3
,
2
,
3
)).
astype
(
'float32'
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'reduce_all'
:
True
,
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
()}
@
skip_check_grad_ci
(
reason
=
"reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
class
TestReduceMax3DBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def
setUp
(
self
):
self
.
op_type
=
"reduce_max"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'dim'
:
[
-
1
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
max
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))}
@
skip_check_grad_ci
(
reason
=
"reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
class
TestReduceMax4DNegativeAndPositiveDimsBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def
setUp
(
self
):
self
.
op_type
=
"reduce_max"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
5
,
6
,
10
,
9
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'dim'
:
[
-
1
,
0
,
1
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
max
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))}
@
skip_check_grad_ci
(
reason
=
"reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
class
TestReduceMin3DBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def
setUp
(
self
):
self
.
op_type
=
"reduce_min"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'dim'
:
[
2
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
min
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceMean3DBF16ONEDNNOp
(
TestReduceSumDefaultBF16ONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
use_mkldnn
=
True
x_fp32
=
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)
x_bf16
=
convert_float_to_uint16
(
x_fp32
)
self
.
inputs
=
{
'X'
:
x_bf16
}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
x_fp32
.
sum
(
axis
=
0
)
/
x_fp32
.
shape
[
0
]}
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py
0 → 100644
浏览文件 @
3a804a0e
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
paddle.fluid.tests.unittests.op_test
import
OpTest
,
skip_check_grad_ci
import
paddle.fluid
as
fluid
import
paddle
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSumDefaultONEDNNOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
0
)}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
}
def
test_check_output
(
self
):
self
.
check_output
()
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
10
,
5
,
5
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
,
'dim'
:
[
2
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DReduceAllWithoutReduceAllAttributeONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
10
,
5
,
5
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
,
'dim'
:
[
0
,
1
,
2
,
3
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DReduceAllWithoutReduceAllAttributeNegativeDimsONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
10
,
5
,
5
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
,
'dim'
:
[
-
1
,
-
2
,
-
3
,
-
4
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum5DKeepDimsONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
2
,
5
,
3
,
2
,
2
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'dim'
:
(
2
,
3
,
4
),
'keep_dim'
:
True
,
'use_mkldnn'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
self
.
attrs
[
'keep_dim'
])
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum5DReduceAllKeepDimsONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
2
,
5
,
3
,
2
,
2
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'reduce_all'
:
True
,
'keep_dim'
:
True
,
'use_mkldnn'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
keepdims
=
self
.
attrs
[
'keep_dim'
])
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceSum4DReduceAllONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
2
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'reduce_all'
:
True
,
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
()}
@
skip_check_grad_ci
(
reason
=
"reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
class
TestReduceMax3DONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def
setUp
(
self
):
self
.
op_type
=
"reduce_max"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'dim'
:
[
-
1
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
max
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
@
skip_check_grad_ci
(
reason
=
"reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
class
TestReduceMax4DNegativeAndPositiveDimsONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def
setUp
(
self
):
self
.
op_type
=
"reduce_max"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
,
9
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'dim'
:
[
-
1
,
0
,
1
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
max
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
@
skip_check_grad_ci
(
reason
=
"reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
class
TestReduceMin3DONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def
setUp
(
self
):
self
.
op_type
=
"reduce_min"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'dim'
:
[
2
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
min
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceMean3DONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'dim'
:
[
0
],
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
0
)
/
self
.
inputs
[
'X'
].
shape
[
0
]
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceMean4DReduceAllONEDNNOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
8
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'reduce_all'
:
True
,
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
()
/
np
.
asarray
(
self
.
inputs
[
'X'
].
shape
).
prod
()
}
@
skip_check_grad_ci
(
reason
=
"not implemented"
)
class
TestReduceMeanNoReduce1DOp
(
TestReduceSumDefaultONEDNNOp
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
use_mkldnn
=
True
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
1
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]}
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
tools/static_mode_white_list.py
浏览文件 @
3a804a0e
...
...
@@ -421,6 +421,8 @@ STATIC_MODE_TESTING_LIST = [
'test_reader_reset'
,
'test_recurrent_op'
,
'test_reduce_op'
,
'test_reduce_mkldnn_op'
,
'test_reduce_bf16_mkldnn_op'
,
'test_ref_by_trainer_id_op'
,
'test_registry'
,
'test_regularizer'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录