未验证 提交 38ba5f2e 编写于 作者: F FlyingQianMM 提交者: GitHub

fix ScaleKernel configuration error where input numel is 0 (#47111)

* fix scale kernel configuration error where input numel is 0

* fix code stype

* add unit test case for scale op when numel of input x is zero

* fix ci codestyle check

* add cpu and gpu unit test case for scale op when numel of input x is zero

* add uninitialized judgment for input of scale
上级 cdd7b956
......@@ -40,6 +40,9 @@ void ScaleKernel(const Context& dev_ctx,
// TODO(chenweihang): now the eigen function here need the dtype of scale,
// eigen_x, bias should be same, so here need cast for two scalar arg,
// maybe we declare that the type of scale and bias is T?
if (x.numel() <= 0 || (!x.IsInitialized())) {
return;
}
paddle::operators::EigenScale<std::decay_t<decltype(dev)>, T>::Eval(
dev,
eigen_out,
......
......@@ -53,6 +53,9 @@ void ScaleKernel(const Context& dev_ctx,
inputs.emplace_back(&x);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
if (x.numel() <= 0 || (!x.IsInitialized())) {
return;
}
phi::funcs::ElementwiseKernel<T>(
dev_ctx,
inputs,
......
......@@ -299,5 +299,19 @@ class TestScaleTripleGradCheck(unittest.TestCase):
self.func(p)
class TestScaleOpZeroNumelVariable(unittest.TestCase):
def test_check_zero_numel_cpu(self):
paddle.set_device('cpu')
data = paddle.ones([0, 1])
out = paddle.scale(data, 2)
self.assertEqual(out, data)
if paddle.is_compiled_with_cuda():
paddle.set_device('gpu')
data = paddle.ones([0, 1])
out = paddle.scale(data, 2)
self.assertEqual(out, data)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册