From 2c1d494edb3324d1a8a2c7ac2163653cfbd7bd7d Mon Sep 17 00:00:00 2001 From: Xinyu Chen Date: Mon, 27 Mar 2023 15:38:07 +0800 Subject: [PATCH] elementwise: onednn: support zero dimension inputs (#51656) --- paddle/fluid/operators/transfer_layout_op.h | 5 +- paddle/phi/backends/onednn/onednn_reuse.h | 12 +++-- .../kernels/onednn/elementwise_grad_kernel.cc | 6 ++- .../mkldnn/test_elementwise_add_mkldnn_op.py | 21 ++++++++ .../mkldnn/test_elementwise_div_mkldnn_op.py | 39 +++++++++++++++ .../mkldnn/test_elementwise_mul_mkldnn_op.py | 48 +++++++++++++++++++ .../mkldnn/test_elementwise_sub_mkldnn_op.py | 48 +++++++++++++++++++ 7 files changed, 172 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/transfer_layout_op.h b/paddle/fluid/operators/transfer_layout_op.h index 72e16776463..b44378d0c4e 100644 --- a/paddle/fluid/operators/transfer_layout_op.h +++ b/paddle/fluid/operators/transfer_layout_op.h @@ -91,8 +91,9 @@ class TransferLayoutFunctor { phi::funcs::MatchShapeToLayout(&out_tensor, in_layout, out_layout); phi::OneDNNContext::tls().set_cur_paddle_data_layout(in_layout); } - - auto out_tz = phi::vectorize(out_tensor.dims()); + auto out_tz = out_tensor.dims().size() == 0 + ? std::vector{1} + : phi::vectorize(out_tensor.dims()); dnnl::memory::data_type in_type = phi::funcs::ToOneDNNDataType(in_tensor.dtype()); diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index 19e0175ff9d..8e0eefb8495 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -936,8 +936,13 @@ class BinaryOneDNNHandler : public OneDNNHandlerNoCachingT { // if output tensor(z) is nullptr then we are computing into oneDNN // managed buffer auto rankdiff = x->dims().size() - y->dims().size(); - auto dst_tz = (out == nullptr) ? (rankdiff > 0 ? src_x_tz : src_y_tz) - : vectorize(out->dims()); + auto dst_tz = + (out == nullptr) + ? (rankdiff > 0 ? src_x_tz + : (y->dims().size() == 0 ? std::vector{1} + : src_x_tz)) + : (out->dims().size() == 0 ? std::vector{1} + : vectorize(out->dims())); auto src0_md = x->mem_desc(); auto src1_md = y->mem_desc(); @@ -1074,7 +1079,8 @@ class BroadcastDataOneDNNHandler float scale_y, const std::vector& extended_x_dims) : OneDNNHandlerNoCachingT(engine, cpu_place) { - const auto src0_tz = vectorize(out->dims()); + const auto src0_tz = out->dims().size() == 0 ? std::vector{1} + : vectorize(out->dims()); const auto src0_md = dnnl::memory::desc( src0_tz, OneDNNGetDataType(), GetPlainOneDNNFormat(src0_tz.size())); const auto src1_md = x->mem_desc().reshape(extended_x_dims); diff --git a/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc b/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc index 83d6f8aa597..ad9c9d87e43 100644 --- a/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc @@ -97,8 +97,10 @@ inline void BroadcastReduction(const Place& place, {DNNL_ARG_DST, *dst_memory}, }); astream.wait(); - grad_tensor->set_mem_desc(dst_memory->get_desc().reshape( - phi::vectorize(grad_tensor->dims()))); + auto grad_shape = grad_tensor->dims().size() == 0 + ? std::vector{1} + : phi::vectorize(grad_tensor->dims()); + grad_tensor->set_mem_desc(dst_memory->get_desc().reshape(grad_shape)); } } // namespace funcs diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py index a50289de1ee..20d79a17dfb 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py @@ -103,6 +103,27 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestMKLDNNElementwiseAddOp): pass +class TestMKLDNNElementwiseAddOpZeroDim(TestMKLDNNElementwiseAddOp): + def init_input_output(self): + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.add(self.x, self.y) + + +class TestMKLDNNElementwiseAddOpZeroDim2(TestMKLDNNElementwiseAddOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) + self.out = np.add(self.x, self.y) + + +class TestMKLDNNElementwiseAddOpZeroDim3(TestMKLDNNElementwiseAddOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.add(self.x, self.y) + + ''' INT8 Tests ''' diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py index c7f4daa6d79..3df9f009c83 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py @@ -111,6 +111,45 @@ class TestMKLDNNElementwiseDivOp5(TestMKLDNNElementwiseDivOp): pass +class TestMKLDNNElementwiseDivOpZeroDim(TestMKLDNNElementwiseDivOp): + def init_input_output(self): + self.x = np.random.uniform(0.1, 1, [100]).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.divide(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + +class TestMKLDNNElementwiseDivOpZeroDim2(TestMKLDNNElementwiseDivOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [100]).astype(self.dtype) + self.out = np.divide(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + +class TestMKLDNNElementwiseDivOpZeroDim3(TestMKLDNNElementwiseDivOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.divide(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + @OpTestTool.skip_if_not_cpu_bf16() class TestBf16(TestMKLDNNElementwiseDivOp): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index a558b05196d..5dd77e414b7 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -76,6 +76,54 @@ class TestMKLDNNElementwiseMulOp5(TestMKLDNNElementwiseMulOp): pass +class TestMKLDNNElementwiseMulOpZeroDim(TestMKLDNNElementwiseMulOp): + def init_input_output(self): + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ingore_y(self): + pass + + def test_check_grad_ingore_x(self): + pass + + +class TestMKLDNNElementwiseMulOpZeroDim2(TestMKLDNNElementwiseMulOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ingore_y(self): + pass + + def test_check_grad_ingore_x(self): + pass + + +class TestMKLDNNElementwiseMulOpZeroDim3(TestMKLDNNElementwiseMulOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ingore_y(self): + pass + + def test_check_grad_ingore_x(self): + pass + + ''' INT8 Tests ''' diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py index c18becac7f6..a3623f163d1 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py @@ -133,6 +133,54 @@ class TestElementwiseSubOp_xsize_lessthan_ysize_sub(TestMKLDNNElementwiseSubOp): self.axis = 2 +class TestMKLDNNElementwiseSubOpZeroDim(TestMKLDNNElementwiseSubOp): + def init_input_output(self): + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.subtract(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + def test_check_grad_ignore_y(self): + pass + + +class TestMKLDNNElementwiseSubOpZeroDim2(TestMKLDNNElementwiseSubOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) + self.out = np.subtract(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + def test_check_grad_ignore_y(self): + pass + + +class TestMKLDNNElementwiseSubOpZeroDim3(TestMKLDNNElementwiseSubOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.subtract(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + def test_check_grad_ignore_y(self): + pass + + @OpTestTool.skip_if_not_cpu_bf16() class TestBf16(TestMKLDNNElementwiseSubOp): def setUp(self): -- GitLab