diff --git a/paddle/fluid/operators/transfer_layout_op.h b/paddle/fluid/operators/transfer_layout_op.h index 72e1677646383b3add7394babbde398035f54bb1..b44378d0c4e1578b96de0e7d4f3b5656b3150f39 100644 --- a/paddle/fluid/operators/transfer_layout_op.h +++ b/paddle/fluid/operators/transfer_layout_op.h @@ -91,8 +91,9 @@ class TransferLayoutFunctor { phi::funcs::MatchShapeToLayout(&out_tensor, in_layout, out_layout); phi::OneDNNContext::tls().set_cur_paddle_data_layout(in_layout); } - - auto out_tz = phi::vectorize(out_tensor.dims()); + auto out_tz = out_tensor.dims().size() == 0 + ? std::vector{1} + : phi::vectorize(out_tensor.dims()); dnnl::memory::data_type in_type = phi::funcs::ToOneDNNDataType(in_tensor.dtype()); diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index 19e0175ff9dd604e72097fa429a8a2607d5e2479..8e0eefb8495f218b6952d6a7e85e06ce73d1cfd3 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -936,8 +936,13 @@ class BinaryOneDNNHandler : public OneDNNHandlerNoCachingT { // if output tensor(z) is nullptr then we are computing into oneDNN // managed buffer auto rankdiff = x->dims().size() - y->dims().size(); - auto dst_tz = (out == nullptr) ? (rankdiff > 0 ? src_x_tz : src_y_tz) - : vectorize(out->dims()); + auto dst_tz = + (out == nullptr) + ? (rankdiff > 0 ? src_x_tz + : (y->dims().size() == 0 ? std::vector{1} + : src_x_tz)) + : (out->dims().size() == 0 ? std::vector{1} + : vectorize(out->dims())); auto src0_md = x->mem_desc(); auto src1_md = y->mem_desc(); @@ -1074,7 +1079,8 @@ class BroadcastDataOneDNNHandler float scale_y, const std::vector& extended_x_dims) : OneDNNHandlerNoCachingT(engine, cpu_place) { - const auto src0_tz = vectorize(out->dims()); + const auto src0_tz = out->dims().size() == 0 ? std::vector{1} + : vectorize(out->dims()); const auto src0_md = dnnl::memory::desc( src0_tz, OneDNNGetDataType(), GetPlainOneDNNFormat(src0_tz.size())); const auto src1_md = x->mem_desc().reshape(extended_x_dims); diff --git a/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc b/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc index 83d6f8aa59777dbf8a9892ba90f83d5e126d4384..ad9c9d87e4330bccb4074d2a7329fe12ab8213b8 100644 --- a/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/elementwise_grad_kernel.cc @@ -97,8 +97,10 @@ inline void BroadcastReduction(const Place& place, {DNNL_ARG_DST, *dst_memory}, }); astream.wait(); - grad_tensor->set_mem_desc(dst_memory->get_desc().reshape( - phi::vectorize(grad_tensor->dims()))); + auto grad_shape = grad_tensor->dims().size() == 0 + ? std::vector{1} + : phi::vectorize(grad_tensor->dims()); + grad_tensor->set_mem_desc(dst_memory->get_desc().reshape(grad_shape)); } } // namespace funcs diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py index a50289de1eebfef4841413152cac76cfff61f454..20d79a17dfb138936ad6e207d046c8a0c2f865f1 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py @@ -103,6 +103,27 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestMKLDNNElementwiseAddOp): pass +class TestMKLDNNElementwiseAddOpZeroDim(TestMKLDNNElementwiseAddOp): + def init_input_output(self): + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.add(self.x, self.y) + + +class TestMKLDNNElementwiseAddOpZeroDim2(TestMKLDNNElementwiseAddOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) + self.out = np.add(self.x, self.y) + + +class TestMKLDNNElementwiseAddOpZeroDim3(TestMKLDNNElementwiseAddOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.add(self.x, self.y) + + ''' INT8 Tests ''' diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py index c7f4daa6d7950a9d1f9fc49b1a04b11fd1ccd0f2..3df9f009c8311b48ff9a2fa4b0b94fa99ca4c83d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py @@ -111,6 +111,45 @@ class TestMKLDNNElementwiseDivOp5(TestMKLDNNElementwiseDivOp): pass +class TestMKLDNNElementwiseDivOpZeroDim(TestMKLDNNElementwiseDivOp): + def init_input_output(self): + self.x = np.random.uniform(0.1, 1, [100]).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.divide(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + +class TestMKLDNNElementwiseDivOpZeroDim2(TestMKLDNNElementwiseDivOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [100]).astype(self.dtype) + self.out = np.divide(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + +class TestMKLDNNElementwiseDivOpZeroDim3(TestMKLDNNElementwiseDivOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.divide(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + @OpTestTool.skip_if_not_cpu_bf16() class TestBf16(TestMKLDNNElementwiseDivOp): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index a558b05196dcf1e5510c3d59c98885d4fa26d94c..5dd77e414b74de3ef6e8fea047f31da41e9eaaee 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -76,6 +76,54 @@ class TestMKLDNNElementwiseMulOp5(TestMKLDNNElementwiseMulOp): pass +class TestMKLDNNElementwiseMulOpZeroDim(TestMKLDNNElementwiseMulOp): + def init_input_output(self): + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ingore_y(self): + pass + + def test_check_grad_ingore_x(self): + pass + + +class TestMKLDNNElementwiseMulOpZeroDim2(TestMKLDNNElementwiseMulOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ingore_y(self): + pass + + def test_check_grad_ingore_x(self): + pass + + +class TestMKLDNNElementwiseMulOpZeroDim3(TestMKLDNNElementwiseMulOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ingore_y(self): + pass + + def test_check_grad_ingore_x(self): + pass + + ''' INT8 Tests ''' diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py index c18becac7f6937c63359fcf510ab5e3cc6b06d93..a3623f163d195a6bee07be491e93911234faf39c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py @@ -133,6 +133,54 @@ class TestElementwiseSubOp_xsize_lessthan_ysize_sub(TestMKLDNNElementwiseSubOp): self.axis = 2 +class TestMKLDNNElementwiseSubOpZeroDim(TestMKLDNNElementwiseSubOp): + def init_input_output(self): + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.subtract(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + def test_check_grad_ignore_y(self): + pass + + +class TestMKLDNNElementwiseSubOpZeroDim2(TestMKLDNNElementwiseSubOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) + self.out = np.subtract(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + def test_check_grad_ignore_y(self): + pass + + +class TestMKLDNNElementwiseSubOpZeroDim3(TestMKLDNNElementwiseSubOp): + def init_input_output(self): + self.x = np.array(3.0).astype(self.dtype) + self.y = np.array(3.0).astype(self.dtype) + self.out = np.subtract(self.x, self.y) + + def test_check_grad_normal(self): + pass + + def test_check_grad_ignore_x(self): + pass + + def test_check_grad_ignore_y(self): + pass + + @OpTestTool.skip_if_not_cpu_bf16() class TestBf16(TestMKLDNNElementwiseSubOp): def setUp(self):