diff --git a/paddle/phi/kernels/cpu/scale_kernel.cc b/paddle/phi/kernels/cpu/scale_kernel.cc index 358d89197edb2ee0e096ec5b270f9fe33e73502e..53a1391b0cf4d244ac5ee8ddb2fff8f0edf4d1e2 100644 --- a/paddle/phi/kernels/cpu/scale_kernel.cc +++ b/paddle/phi/kernels/cpu/scale_kernel.cc @@ -40,6 +40,9 @@ void ScaleKernel(const Context& dev_ctx, // TODO(chenweihang): now the eigen function here need the dtype of scale, // eigen_x, bias should be same, so here need cast for two scalar arg, // maybe we declare that the type of scale and bias is T? + if (x.numel() <= 0 || (!x.IsInitialized())) { + return; + } paddle::operators::EigenScale, T>::Eval( dev, eigen_out, diff --git a/paddle/phi/kernels/gpu/scale_kernel.cu b/paddle/phi/kernels/gpu/scale_kernel.cu index 1a574c05494fdaa14f3d8ca0f148d2f60c3964f5..0efcd0b7063f34360f2f5e4e175a6714d3ab6a6e 100644 --- a/paddle/phi/kernels/gpu/scale_kernel.cu +++ b/paddle/phi/kernels/gpu/scale_kernel.cu @@ -53,6 +53,9 @@ void ScaleKernel(const Context& dev_ctx, inputs.emplace_back(&x); outputs.emplace_back(out); dev_ctx.template Alloc(out); + if (x.numel() <= 0 || (!x.IsInitialized())) { + return; + } phi::funcs::ElementwiseKernel( dev_ctx, inputs, diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index 4fec89905373f1cc0b6ce0e9c4644e3657cdbe96..07be7620a93a1174dc8cd8a63dd6708bcc730341 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -299,5 +299,19 @@ class TestScaleTripleGradCheck(unittest.TestCase): self.func(p) +class TestScaleOpZeroNumelVariable(unittest.TestCase): + def test_check_zero_numel_cpu(self): + paddle.set_device('cpu') + data = paddle.ones([0, 1]) + out = paddle.scale(data, 2) + self.assertEqual(out, data) + + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + data = paddle.ones([0, 1]) + out = paddle.scale(data, 2) + self.assertEqual(out, data) + + if __name__ == "__main__": unittest.main()