diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index c3f550c0ed8d9bd175822d1f91ef1cf29b58d0b7..e6abde83498f8437550bcba2c64bd82553e83eaa 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -2103,8 +2103,8 @@ PDNode *patterns::Bfloat16Placement::operator()( std::unordered_set supported_op_types = std::unordered_set( {"concat", "conv2d", "elementwise_add", "elementwise_mul", "fc", - "fusion_gru", "gelu", "layer_norm", "matmul", "reshape2", "softmax", - "sum", "transpose2"}); + "fusion_gru", "gelu", "layer_norm", "matmul", "pool2d", "reshape2", + "softmax", "sum", "transpose2"}); if (!bfloat16_enabled_op_types.empty()) { supported_op_types = bfloat16_enabled_op_types; } diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc index c64bc8a214acac52341751bafc3d98892586f8bc..28a45f36fb71d7aa5e13128afa2e06301dbbcef9 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc @@ -136,7 +136,7 @@ TEST(Bfloat16PlacementPass, enabled_conv_and_pool) { MainTest({"conv2d", "pool2d"}, 3); } -TEST(Bfloat16PlacementPass, default_attr_value) { DefaultAttrTest(7); } +TEST(Bfloat16PlacementPass, default_attr_value) { DefaultAttrTest(10); } } // namespace ir } // namespace framework diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 72d2f779f800b3c316da2aecae4068dc3fed025e..4e689f5bccf4b4f3925eec18710faa87fe47498a 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -181,7 +181,8 @@ namespace ops = paddle::operators; REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace, ops::PoolMKLDNNOpKernel, ops::PoolMKLDNNOpKernel, - ops::PoolMKLDNNOpKernel); + ops::PoolMKLDNNOpKernel, + ops::PoolMKLDNNOpKernel); REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace, ops::PoolMKLDNNGradOpKernel); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py index 1179556f915be887cf7646424a38d27810dcd8a2..2b7b2b36afa4fb22c3bdfbb9beb8415f2159d99d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py @@ -27,7 +27,6 @@ from paddle import enable_static "place does not support BF16 evaluation") class TestConcatBf16Op(OpTest): def setUp(self): - enable_static() self.op_type = "concat" self.use_mkldnn = True self.mkldnn_data_type = "bfloat16" @@ -107,4 +106,5 @@ class TestAxis3Case(TestConcatBf16Op): if __name__ == '__main__': + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py new file mode 100644 index 0000000000000000000000000000000000000000..da37b33d30d5de56ffc95fc38f2bc3f6877a7d5b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py @@ -0,0 +1,100 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import os +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive +from paddle import enable_static + + +@unittest.skipIf(not core.supports_bfloat16(), + "place does not support BF16 evaluation") +class TestPoolBf16MklDNNOp(TestPool2D_Op): + def init_kernel_type(self): + self.use_mkldnn = True + + def setUp(self): + TestPool2D_Op.setUp(self) + self.dtype = np.uint16 + + input = np.random.random(self.shape).astype(np.float32) + output = (self.pool2D_forward_naive( + input, self.ksize, self.strides, self.paddings, self.global_pool, + self.ceil_mode, self.exclusive, self.adaptive, + "float32")).astype(np.float32) + + self.inputs = {'X': convert_float_to_uint16(input)} + self.outputs = {'Out': convert_float_to_uint16(output)} + + def test_check_output(self): + self.check_output_with_place(core.CPUPlace()) + + def test_check_grad(self): + pass + + +class TestCase1Avg(TestPoolBf16MklDNNOp): + def init_test_case(self): + self.shape = [2, 3, 7, 7] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [0, 0] + + def init_global_pool(self): + self.global_pool = False + + def init_exclusive(self): + self.exclusive = True + + +class TestCase2Avg(TestPoolBf16MklDNNOp): + def init_test_case(self): + self.shape = [2, 3, 7, 7] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1] + + def init_global_pool(self): + self.global_pool = False + + def init_exclusive(self): + self.exclusive = False + + +class TestCase0Max(TestPoolBf16MklDNNOp): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +class TestCase1Max(TestCase1Avg): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +class TestCase2Max(TestCase2Avg): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +if __name__ == "__main__": + enable_static() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py index 854ddb17fb275acb14d06c370011e68ee83c43f2..5128dc1c4a3447a3d975f0f9d31019fbf4cc060d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py @@ -27,7 +27,6 @@ from paddle import enable_static "place does not support BF16 evaluation") class TestReshapeBf16Op(OpTest): def setUp(self): - enable_static() self.op_type = "reshape2" self.use_mkldnn = True self.mkldnn_data_type = "bfloat16" @@ -59,4 +58,5 @@ class TestReshapeBf16Op(OpTest): if __name__ == '__main__': + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py index 5ba944c3b98f482fd9b165637132da2c3cdfe99e..e9b0cafd11495c8403702fb410b42700f52b3d01 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py @@ -29,6 +29,8 @@ def stable_softmax(x): return exps / np.sum(exps) +@unittest.skipIf(not core.supports_bfloat16(), + "place does not support BF16 evaluation") class TestSoftmaxMKLDNNOp(TestSoftmaxOp): def get_x_shape(self): return [10, 10] diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py index de04cecbf4c9bc89b02213134e192b594f61127a..72efa0aa99e7d18ddb6cb9dde44106a78613dd84 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py @@ -25,7 +25,6 @@ from paddle import enable_static "place does not support BF16 evaluation") class TestTransposeOp(OpTest): def setUp(self): - enable_static() self.op_type = "transpose2" self.use_mkldnn = True self.mkldnn_data_type = "bfloat16" @@ -63,4 +62,5 @@ class TestBF16Case(TestTransposeOp): if __name__ == '__main__': + enable_static() unittest.main() diff --git a/tools/static_mode_white_list.py b/tools/static_mode_white_list.py index b6e8203aa774d0d2dfbdc14566ec64002f9fbd02..68e58445da0365ebed38c4e2893c0e4d51049d2a 100644 --- a/tools/static_mode_white_list.py +++ b/tools/static_mode_white_list.py @@ -425,6 +425,7 @@ STATIC_MODE_TESTING_LIST = [ 'test_regularizer_api', 'test_reorder_lod_tensor', 'test_reshape_op', + 'test_reshape_bf16_op', 'test_retinanet_detection_output', 'test_reverse_op', 'test_rmsprop_op', @@ -582,6 +583,7 @@ STATIC_MODE_TESTING_LIST = [ 'test_var_conv_2d', 'test_batch_norm_mkldnn_op', 'test_concat_int8_mkldnn_op', + 'test_concat_bf16_mkldnn_op', 'test_concat_mkldnn_op', 'test_conv2d_bf16_mkldnn_op', 'test_conv2d_int8_mkldnn_op', @@ -606,6 +608,7 @@ STATIC_MODE_TESTING_LIST = [ 'test_multi_gru_fuse_pass', 'test_multi_gru_seq_fuse_pass', 'test_pool2d_int8_mkldnn_op', + 'test_pool2d_bf16_mkldnn_op', 'test_pool2d_mkldnn_op', 'test_quantize_mkldnn_op', 'test_requantize_mkldnn_op', @@ -614,6 +617,7 @@ STATIC_MODE_TESTING_LIST = [ 'test_sum_mkldnn_op', 'test_sum_bf16_mkldnn_op', 'test_transpose_int8_mkldnn_op', + 'test_transpose_bf16_mkldnn_op', 'test_transpose_mkldnn_op', 'test_mkldnn_conv_activation_fuse_pass', 'test_mkldnn_conv_concat_relu_mkldnn_fuse_pass',