未验证 提交 5dfddaea 编写于 作者: G gouzil 提交者: GitHub

[Divide by 0 Error] add norm check (#49966)

* [Divide by 0 Error] add norm check

* [Divide by 0 Error] fix x AttributeError

* [Divide by 0 Error] norm check migrate to c++
上级 bdae5481
......@@ -61,6 +61,13 @@ void PNormKernel(const Context& dev_ctx,
int pre, n, post;
GetDims(xdim, axis, &pre, &n, &post, asvector);
for (int i = 0; i < xdim.size(); i++) {
PADDLE_ENFORCE_LT(0,
xdim[i],
errors::InvalidArgument(
"The dims of Input(X) should be greater than 0."));
}
auto* place = dev_ctx.eigen_device();
Eigen::DSizes<int, 3> shape(pre, n, post);
......
......@@ -105,6 +105,13 @@ void PNormKernel(const Context& dev_ctx,
std::vector<int> reduce_axis =
funcs::details::GetReduceDim(axis_dims, xdim.size(), asvector);
for (int i = 0; i < xdim.size(); i++) {
PADDLE_ENFORCE_LT(0,
xdim[i],
errors::InvalidArgument(
"The dims of Input(X) should be greater than 0."));
}
using MT = typename dtype::MPTypeTrait<T>::Type;
if (porder == 0) {
phi::funcs::ReduceKernel<T, T, kps::AddFunctor, NonzeroFunctor<T>>(
......
......@@ -55,6 +55,14 @@ void PNormKernel(const Context& dev_ctx,
int n = 1;
int t = 1;
GetDims(xdim, axis, &m, &t, &n, asvector);
for (int i = 0; i < xdim.size(); i++) {
PADDLE_ENFORCE_LT(0,
xdim[i],
errors::InvalidArgument(
"The dims of Input(X) should be greater than 0."));
}
x_dim.push_back(m);
x_dim.push_back(t);
x_dim.push_back(n);
......
......@@ -655,6 +655,15 @@ class API_NormTest(unittest.TestCase):
ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1]
)
with fluid.dygraph.guard():
# The size of input in Norm should not be 0.
def test_0_size():
array = np.array([], dtype=np.float32)
x = paddle.to_tensor(np.reshape(array, [0, 0]), dtype='float32')
paddle.linalg.norm(x, axis=0)
self.assertRaises(ValueError, test_0_size)
if __name__ == '__main__':
paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册