From f2068eecc4de6b7e5ec1dcce959540f44584de1a Mon Sep 17 00:00:00 2001 From: xiaoting <31891223+tink2123@users.noreply.github.com> Date: Tue, 6 Jul 2021 13:14:23 +0800 Subject: [PATCH] Enhance error message for interpolate_v2 (#33941) * fix interpolate for shape[i]=0, test=develop * fix test_trilinear_interp_v2 random failure, test=develop --- paddle/fluid/operators/interpolate_v2_op.cc | 21 ++++++++++++++++++- .../unittests/test_bicubic_interp_v2_op.py | 6 ++++++ .../unittests/test_trilinear_interp_v2_op.py | 2 ++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/interpolate_v2_op.cc b/paddle/fluid/operators/interpolate_v2_op.cc index a4353420c84..97e39e71a55 100644 --- a/paddle/fluid/operators/interpolate_v2_op.cc +++ b/paddle/fluid/operators/interpolate_v2_op.cc @@ -35,7 +35,12 @@ static void Interpolate1DInferShapeCheck(framework::InferShapeContext* ctx) { interp_method)); const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); - + for (int i = 0; i < dim_x.size(); ++i) { + PADDLE_ENFORCE_NE(dim_x[i], 0, platform::errors::InvalidArgument( + "The shape of input(x) should be larged " + "than 0, bug received shape[%d] is %d ", + i, dim_x[i])); + } if (ctx->HasInputs("SizeTensor")) { // top prority size auto inputs_name = ctx->Inputs("SizeTensor"); @@ -134,6 +139,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); + for (int i = 0; i < dim_x.size(); ++i) { + PADDLE_ENFORCE_NE(dim_x[i], 0, platform::errors::InvalidArgument( + "The shape of input(x) should be larged " + "than 0, bug received shape[%d] is %d ", + i, dim_x[i])); + } + if (ctx->HasInputs("SizeTensor")) { // top prority size auto inputs_name = ctx->Inputs("SizeTensor"); @@ -246,6 +258,13 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); + for (int i = 0; i < dim_x.size(); ++i) { + PADDLE_ENFORCE_NE(dim_x[i], 0, platform::errors::InvalidArgument( + "The shape of input(x) should be larged " + "than 0, bug received shape[%d] is %d ", + i, dim_x[i])); + } + if (ctx->HasInputs("SizeTensor")) { // top prority size auto inputs_name = ctx->Inputs("SizeTensor"); diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index b1ec7441198..58d8d0f53d0 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -517,6 +517,11 @@ class TestBicubicOpError(unittest.TestCase): out = interpolate( x, size={2, 2}, mode='bicubic', align_corners=False) + def test_input_shape(): + x = fluid.data(name="x", shape=[2, 1, 0, 0], dtype="float32") + out = interpolate( + x, size=[3, 3], mode="bicubic", align_corners=False) + self.assertRaises(ValueError, test_mode_type) self.assertRaises(ValueError, test_input_shape) self.assertRaises(TypeError, test_align_corcers) @@ -534,6 +539,7 @@ class TestBicubicOpError(unittest.TestCase): self.assertRaises(ValueError, test_size_and_scale) self.assertRaises(ValueError, test_size_and_scale2) self.assertRaises(TypeError, test_size_type) + self.assertRaises(ValueError, test_input_shape) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py index 1f8ff4963ec..9f46b539a04 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py @@ -21,6 +21,8 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.nn.functional import interpolate +np.random.seed(123) + def trilinear_interp_np(input, out_d, -- GitLab