From 571a63e7ecc1f385360230cfc5811e8ffc8defe6 Mon Sep 17 00:00:00 2001 From: "joanna.wozna.intel" Date: Thu, 29 Oct 2020 04:09:54 +0100 Subject: [PATCH] Add bf16 transpose2, reshape2, concat ops (#28195) --- .../framework/ir/graph_pattern_detector.cc | 3 +- .../cpu_bfloat16_placement_pass_tester.cc | 15 ++- .../operators/mkldnn/concat_mkldnn_op.cc | 1 + .../operators/mkldnn/transpose_mkldnn_op.cc | 5 + paddle/fluid/operators/reshape_op.cc | 4 +- .../mkldnn/test_concat_bf16_mkldnn_op.py | 110 ++++++++++++++++++ .../unittests/mkldnn/test_reshape_bf16_op.py | 62 ++++++++++ .../mkldnn/test_transpose_bf16_mkldnn_op.py | 66 +++++++++++ 8 files changed, 260 insertions(+), 6 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py create mode 100644 python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py create mode 100644 python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 5ffaf28fe9..20da74eca4 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -2101,7 +2101,8 @@ PDNode *patterns::QuantizePlacement::operator()( PDNode *patterns::Bfloat16Placement::operator()( const std::unordered_set &bfloat16_enabled_op_types) { std::unordered_set supported_op_types = - std::unordered_set({"conv2d", "fusion_gru"}); + std::unordered_set( + {"concat", "conv2d", "fusion_gru", "reshape2", "transpose2"}); if (!bfloat16_enabled_op_types.empty()) { supported_op_types = bfloat16_enabled_op_types; } diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc index b9797a4bfc..146e29249b 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass_tester.cc @@ -40,6 +40,10 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, op->SetInput("X", {inputs[0], inputs[1]}); } else if (type == "pool2d") { op->SetInput("X", {inputs[0]}); + } else if (type == "transpose2") { + op->SetInput("X", {inputs[0]}); + } else if (type == "reshape2") { + op->SetInput("X", {inputs[0]}); } else { FAIL() << "Unexpected operator type."; } @@ -57,8 +61,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, ProgramDesc BuildProgramDesc() { ProgramDesc prog; - for (auto& v : - std::vector({"a", "b", "c", "f", "g", "h", "k", "l"})) { + for (auto& v : std::vector( + {"a", "b", "c", "f", "g", "h", "k", "l", "m", "n", "o", "p"})) { prog.MutableBlock(0)->Var(v); } @@ -68,6 +72,9 @@ ProgramDesc BuildProgramDesc() { SetOp(&prog, "pool2d", "pool1", {"g"}, {"h"}); SetOp(&prog, "conv2d", "conv2", {"h"}, {"k"}); SetOp(&prog, "pool2d", "pool2", {"k"}, {"l"}); + SetOp(&prog, "concat", "concat2", {"l", "m"}, {"n"}); + SetOp(&prog, "transpose2", "transpose", {"n"}, {"o"}); + SetOp(&prog, "reshape2", "reshape", {"o"}, {"p"}); return prog; } @@ -115,7 +122,7 @@ void DefaultAttrTest(unsigned expected_bfloat16_data_type_count) { } TEST(Bfloat16PlacementPass, enable_all) { - MainTest({"conv2d", "pool2d", "relu", "concat"}, 6); + MainTest({"conv2d", "pool2d", "relu", "concat"}, 7); } TEST(Bfloat16PlacementPass, enabled_conv_and_pool) { @@ -123,7 +130,7 @@ TEST(Bfloat16PlacementPass, enabled_conv_and_pool) { MainTest({"conv2d", "pool2d"}, 3); } -TEST(Bfloat16PlacementPass, default_attr_value) { DefaultAttrTest(0); } +TEST(Bfloat16PlacementPass, default_attr_value) { DefaultAttrTest(5); } } // namespace ir } // namespace framework diff --git a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc index b2815cbdc6..bb475b4e54 100644 --- a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc @@ -221,5 +221,6 @@ namespace ops = paddle::operators; REGISTER_OP_KERNEL(concat, MKLDNN, ::paddle::platform::CPUPlace, ops::ConcatMKLDNNOpKernel, + ops::ConcatMKLDNNOpKernel, ops::ConcatMKLDNNOpKernel, ops::ConcatMKLDNNOpKernel); diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index 398bdb01b5..28cdd8413a 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -142,6 +142,11 @@ REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(transpose2, MKLDNN, ops::kTransposeMKLDNNINT8, ops::TransposeMKLDNNOpKernel); +REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE( + transpose2, MKLDNN, ::paddle::platform::CPUPlace, BF16, + ops::kTransposeMKLDNNFP32, + ops::TransposeMKLDNNOpKernel); + REGISTER_OP_KERNEL(transpose, MKLDNN, ::paddle::platform::CPUPlace, ops::TransposeMKLDNNOpKernel); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index aa8e390370..7cf85420c5 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -622,7 +622,9 @@ REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int8_t, ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, - bool, ops::ReshapeKernel); + bool, ops::ReshapeKernel, + paddle::platform::bfloat16, ops::ReshapeKernel); + REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py new file mode 100644 index 0000000000..1179556f91 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py @@ -0,0 +1,110 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import struct + +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 +from paddle import enable_static + + +@unittest.skipIf(not core.supports_bfloat16(), + "place does not support BF16 evaluation") +class TestConcatBf16Op(OpTest): + def setUp(self): + enable_static() + self.op_type = "concat" + self.use_mkldnn = True + self.mkldnn_data_type = "bfloat16" + self.init_axis() + self.init_shape() + self.init_test_data() + self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} + self.attrs = { + 'axis': self.axis, + 'use_mkldnn': True, + 'mkldnn_data_type': self.mkldnn_data_type + } + + self.output = np.concatenate( + (self.x0, self.x1, self.x2), axis=self.axis).astype(np.uint16) + self.outputs = {'Out': self.output} + + def test_check_output(self): + self.check_output_with_place(core.CPUPlace()) + +# --------------------test concat bf16 in with axis 0-------------------- + + def init_test_data(self): + self.x0 = convert_float_to_uint16( + np.random.random(self.x0_shape).astype(np.float32)) + self.x1 = convert_float_to_uint16( + np.random.random(self.x1_shape).astype(np.float32)) + self.x2 = convert_float_to_uint16( + np.random.random(self.x2_shape).astype(np.float32)) + + def init_axis(self): + self.axis = 0 + + def init_shape(self): + self.x0_shape = [2, 2, 1, 2] + self.x1_shape = [1, 2, 1, 2] + self.x2_shape = [3, 2, 1, 2] + + +# --------------------test concat bf16 in with axis 1-------------------- + + +class TestAxis1Case(TestConcatBf16Op): + def init_axis(self): + self.axis = 1 + + def init_shape(self): + self.x0_shape = [1, 1, 5, 5] + self.x1_shape = [1, 2, 5, 5] + self.x2_shape = [1, 3, 5, 5] + + +# --------------------test concat bf16 in with axis 2-------------------- + + +class TestAxis2Case(TestConcatBf16Op): + def init_axis(self): + self.axis = 2 + + def init_shape(self): + self.x0_shape = [2, 3, 4, 5] + self.x1_shape = [2, 3, 5, 5] + self.x2_shape = [2, 3, 6, 5] + + +# --------------------test concat bf16 in with axis 3-------------------- + + +class TestAxis3Case(TestConcatBf16Op): + def init_axis(self): + self.axis = 3 + + def init_shape(self): + self.x0_shape = [2, 3, 5, 5] + self.x1_shape = [2, 3, 5, 6] + self.x2_shape = [2, 3, 5, 7] + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py new file mode 100644 index 0000000000..854ddb17fb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import struct + +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 +from paddle import enable_static + + +@unittest.skipIf(not core.supports_bfloat16(), + "place does not support BF16 evaluation") +class TestReshapeBf16Op(OpTest): + def setUp(self): + enable_static() + self.op_type = "reshape2" + self.use_mkldnn = True + self.mkldnn_data_type = "bfloat16" + self.init_data() + self.init_input_data() + + self.inputs = {'X': self.input_data} + self.attrs = { + 'shape': self.new_shape, + 'use_mkldnn': self.use_mkldnn, + 'mkldnn_data_type': self.mkldnn_data_type + } + self.outputs = { + "Out": self.inputs["X"].reshape(self.infered_shape), + 'XShape': np.random.random(self.ori_shape).astype(np.float32) + } + + def init_data(self): + self.ori_shape = (10, 2, 6) + self.new_shape = (10, 0, 3, -1) + self.infered_shape = (10, 2, 3, -1) + + def init_input_data(self): + self.input_data = convert_float_to_uint16( + np.random.random(self.ori_shape).astype(np.float32)) + + def test_check_output(self): + self.check_output_with_place(core.CPUPlace(), no_check_set=['XShape']) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py new file mode 100644 index 0000000000..de04cecbf4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py @@ -0,0 +1,66 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 +from paddle import enable_static + + +@unittest.skipIf(not core.supports_bfloat16(), + "place does not support BF16 evaluation") +class TestTransposeOp(OpTest): + def setUp(self): + enable_static() + self.op_type = "transpose2" + self.use_mkldnn = True + self.mkldnn_data_type = "bfloat16" + self.init_test_case() + self.init_test_data() + self.axis = (0, 2, 3, 1) + + self.inputs = {'X': self.input_data} + + self.attrs = { + 'axis': list(self.axis), + 'use_mkldnn': self.use_mkldnn, + 'mkldnn_data_type': self.mkldnn_data_type + } + + self.outputs = { + 'XShape': np.random.random(self.shape).astype(np.uint16), + 'Out': self.inputs['X'].transpose(self.axis) + } + + def test_check_output(self): + self.check_output_with_place(core.CPUPlace(), no_check_set=['XShape']) + + def init_test_case(self): + self.shape = (2, 3, 4, 5) + + def init_test_data(self): + self.input_data = convert_float_to_uint16( + np.random.random(self.shape).astype(np.float32)) + + +class TestBF16Case(TestTransposeOp): + def init_test_case(self): + self.shape = (2, 4, 6, 8) + + +if __name__ == '__main__': + unittest.main() -- GitLab