From b822f74cd4a044dbc6efaacf6cd361641bed9bea Mon Sep 17 00:00:00 2001 From: wawltor Date: Sun, 12 Apr 2020 14:52:33 +0800 Subject: [PATCH] Add the error raise for some operators, add some test cases Add the error raise for those cases aassign isfinite linspace ones_like zeros_like zeros ones --- paddle/fluid/operators/assign_op.cc | 6 ++-- paddle/fluid/operators/assign_value_op.cc | 5 +-- paddle/fluid/operators/fill_any_like_op.cc | 6 ++-- paddle/fluid/operators/fill_any_like_op.h | 15 +++++--- paddle/fluid/operators/fill_zeros_like_op.cc | 6 ++-- paddle/fluid/operators/linspace_op.cc | 34 ++++++++++-------- python/paddle/fluid/layers/tensor.py | 36 +++++++++++++++++-- .../tests/unittests/test_fill_any_like_op.py | 28 +++++++++++++++ .../tests/unittests/test_fill_constant_op.py | 24 +++++++++++++ .../unittests/test_fill_zeros_like2_op.py | 32 +++++++++++++++++ .../fluid/tests/unittests/test_isfinite_op.py | 16 +++++++++ .../fluid/tests/unittests/test_linspace.py | 33 +++++++++++++++++ 12 files changed, 206 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 0f769a6aa00..549f44250c8 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -75,10 +75,10 @@ class AssignKernel { if (x == nullptr) { return; } + PADDLE_ENFORCE_EQ( + ctx.HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of assign_op is not found.")); auto *out = ctx.OutputVar("Out"); - PADDLE_ENFORCE( - out != nullptr, - "The Output(Out) should not be null if the Input(X) is set."); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(ctx.GetPlace()); diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 8bb78ad7da6..d5b73fc2f65 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -28,8 +28,9 @@ class AssignValueOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of AssignValueOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of assign_op is not found.")); auto shape = ctx->Attrs().Get>("shape"); ctx->SetOutputDim("Out", framework::make_ddim(shape)); } diff --git a/paddle/fluid/operators/fill_any_like_op.cc b/paddle/fluid/operators/fill_any_like_op.cc index 613caca374f..85e0789b0c0 100644 --- a/paddle/fluid/operators/fill_any_like_op.cc +++ b/paddle/fluid/operators/fill_any_like_op.cc @@ -23,10 +23,8 @@ class FillAnyLikeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of FillAnyLikeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of FillAnyLikeOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fill_any_like"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fill_any_like"); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/fill_any_like_op.h b/paddle/fluid/operators/fill_any_like_op.h index f0b6bcb16e2..9c514ed3aaa 100644 --- a/paddle/fluid/operators/fill_any_like_op.h +++ b/paddle/fluid/operators/fill_any_like_op.h @@ -40,15 +40,20 @@ class FillAnyLikeKernel : public framework::OpKernel { auto common_type_value = static_cast(value); - PADDLE_ENFORCE( + PADDLE_ENFORCE_EQ( (common_type_value >= static_cast(std::numeric_limits::lowest())) && (common_type_value <= static_cast(std::numeric_limits::max())), - "filled value is out of range for targeted type in fill_any_like " - "kernel"); - - PADDLE_ENFORCE(!std::isnan(value), "filled value is NaN"); + true, platform::errors::InvalidArgument( + "filled value is out of range for" + " targeted type in fill_any_like, your kernel type is %s" + ", please check value you set.", + typeid(T).name())); + PADDLE_ENFORCE_EQ( + std::isnan(value), false, + platform::errors::InvalidArgument("filled value should not be NaN," + " but received NaN")); math::SetConstant setter; setter(context.template device_context(), out, diff --git a/paddle/fluid/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc index be910810b14..af81c8c32ec 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cc @@ -22,10 +22,8 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of FillZerosLikeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of FillZerosLikeOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fill_zeros_like"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fill_zeros_like"); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/linspace_op.cc b/paddle/fluid/operators/linspace_op.cc index 7ea3b06e02e..0a7146be83d 100644 --- a/paddle/fluid/operators/linspace_op.cc +++ b/paddle/fluid/operators/linspace_op.cc @@ -24,25 +24,29 @@ class LinspaceOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Start"), "Input(Start) of LinspaceOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Stop"), - "Input(Stop) of LinspaceOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Num"), - "Input(Num) of LinspaceOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(OUt) of LinspaceOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Start"), "Input", "Start", "linspace"); + OP_INOUT_CHECK(ctx->HasInput("Stop"), "Input", "Stop", "linspace"); + OP_INOUT_CHECK(ctx->HasInput("Num"), "Input", "Num", "linspace"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "linspace"); auto s_dims = ctx->GetInputDim("Start"); - PADDLE_ENFORCE((s_dims.size() == 1) && (s_dims[0] == 1), - "The shape of Input(Start) should be [1]."); - + PADDLE_ENFORCE_EQ((s_dims.size() == 1) && (s_dims[0] == 1), true, + platform::errors::InvalidArgument( + "The shape of Input(Start) must be [1]," + "but received input shape is [%s].", + s_dims)); auto e_dims = ctx->GetInputDim("Stop"); - PADDLE_ENFORCE((e_dims.size() == 1) && (e_dims[0] == 1), - "The shape of Input(Stop) should be [1]."); - + PADDLE_ENFORCE_EQ((e_dims.size() == 1) && (e_dims[0] == 1), true, + platform::errors::InvalidArgument( + "The shape of Input(Stop) must be [1]," + "but received input shape is [%s].", + e_dims)); auto step_dims = ctx->GetInputDim("Num"); - PADDLE_ENFORCE((step_dims.size() == 1) && (step_dims[0] == 1), - "The shape of Input(Num) should be [1]."); - + PADDLE_ENFORCE_EQ( + (step_dims.size() == 1) && (step_dims[0] == 1), true, + platform::errors::InvalidArgument("The shape of Input(Num) must be [1]," + "but received input shape is [%s].", + step_dims)); ctx->SetOutputDim("Out", {-1}); } diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index c4a553d5038..e238efda8cd 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -961,8 +961,10 @@ def ones(shape, dtype, force_cpu=False): import paddle.fluid as fluid data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]] """ - assert isinstance(shape, list) or isinstance( - shape, tuple), "The shape's type should be list or tuple." + check_type(shape, 'shape', (list, tuple), 'ones') + check_dtype(dtype, 'create data type', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'ones') assert reduce(lambda x, y: x * y, shape) > 0, "The shape is invalid: %s." % (str(shape)) return fill_constant(value=1.0, **locals()) @@ -990,6 +992,7 @@ def zeros(shape, dtype, force_cpu=False): import paddle.fluid as fluid data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] """ + check_type(shape, 'shape', (list, tuple), 'zeros') check_dtype(dtype, 'create data type', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'zeros') @@ -1174,7 +1177,10 @@ def isfinite(x): dtype="float32") out = fluid.layers.isfinite(var) """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "isfinite") helper = LayerHelper("isfinite", **locals()) + out = helper.create_variable_for_type_inference(dtype='bool') helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out}) return out @@ -1273,12 +1279,25 @@ def linspace(start, stop, num, dtype): """ helper = LayerHelper("linspace", **locals()) + check_type(start, 'start', (Variable, float, int), linspace) + check_type(stop, 'stop', (Variable, float, int), linspace) + check_type(num, 'num', (Variable, float, int), linspace) + if not isinstance(start, Variable): start = fill_constant([1], dtype, start) + else: + check_variable_and_dtype(start, "start", ["float32", "float64"], + "linspace") + if not isinstance(stop, Variable): stop = fill_constant([1], dtype, stop) + else: + check_variable_and_dtype(stop, "stop", ["float32", "float64"], + "linspace") if not isinstance(num, Variable): num = fill_constant([1], 'int32', num) + else: + check_variable_and_dtype(num, "num", ["int32"], "linspace") out = helper.create_variable_for_type_inference(dtype=start.dtype) @@ -1315,9 +1334,16 @@ def zeros_like(x, out=None): """ + check_variable_and_dtype( + x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like') helper = LayerHelper("zeros_like", **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) + else: + check_variable_and_dtype( + out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'], + 'ones_like') + helper.append_op( type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]}) out.stop_gradient = True @@ -1462,10 +1488,16 @@ def ones_like(x, out=None): data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0] """ + check_variable_and_dtype( + x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like') helper = LayerHelper("ones_like", **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) + else: + check_variable_and_dtype( + out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'], + 'ones_like') helper.append_op( type='fill_any_like', inputs={'X': [x]}, diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py index b9b20304837..cd902b7e00e 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py @@ -247,6 +247,34 @@ class TestOnesZerosError(unittest.TestCase): self.assertRaises(ValueError, test_device_error4) + def test_ones_like_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + fluid.layers.ones_like([10], dtype="float") + + self.assertRaises(TypeError, test_ones_like_type_error) + + def test_ones_like_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float16") + fluid.layers.ones_like(data, dtype="float32") + + self.assertRaises(TypeError, test_ones_like_dtype_error) + + def test_ones_like_out_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + fluid.layers.ones_like(data, dtype="float32", out=[10]) + + self.assertRaises(TypeError, test_ones_like_out_type_error) + + def test_ones_like_out_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + out = fluid.data(name="out", shape=[10], dtype="float16") + fluid.layers.ones_like(data, dtype="float32", out=out) + + self.assertRaises(TypeError, test_ones_like_out_dtype_error) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index ae7639f29ca..37d4e3e7abb 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -457,6 +457,30 @@ class ApiOnesZerosError(unittest.TestCase): self.assertRaises(ValueError, test_error2) + def test_error3(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.ones(shape=10, dtype="int64") + + self.assertRaises(TypeError, test_error3) + + def test_error4(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.ones(shape=[10], dtype="int8") + + self.assertRaises(TypeError, test_error4) + + def test_error5(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.zeros(shape=10, dtype="int64") + + self.assertRaises(TypeError, test_error5) + + def test_error6(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.zeros(shape=[10], dtype="int8") + + self.assertRaises(TypeError, test_error6) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py b/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py index 935653b07a6..46590bf187a 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid as fluid from paddle.fluid.framework import convert_np_dtype_to_dtype_ from op_test import OpTest @@ -46,5 +47,36 @@ class TestFillZerosLike2OpFp64(TestFillZerosLike2Op): self.dtype = np.float64 +class TestZerosError(unittest.TestCase): + def test_errors(self): + def test_zeros_like_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + fluid.layers.zeros_like([10], dtype="float") + + self.assertRaises(TypeError, test_zeros_like_type_error) + + def test_zeros_like_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float16") + fluid.layers.zeros_like(data, dtype="float32") + + self.assertRaises(TypeError, test_zeros_like_dtype_error) + + def test_zeros_like_out_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + fluid.layers.zeros_like(data, dtype="float32", out=[10]) + + self.assertRaises(TypeError, test_zeros_like_out_type_error) + + def test_zeros_like_out_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + out = fluid.data(name="out", shape=[10], dtype="float16") + fluid.layers.zeros_like(data, dtype="float32", out=out) + + self.assertRaises(TypeError, test_zeros_like_out_dtype_error) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index 22bd3bcf4a1..9b682bb62ec 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -14,8 +14,10 @@ import unittest import numpy as np +import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest +import unittest class TestInf(OpTest): @@ -38,6 +40,20 @@ class TestInf(OpTest): self.check_output() +class TestRaiseError(unittest.TestCase): + def test_errors(self): + def test_type(): + fluid.layers.isfinite([10]) + + self.assertRaises(TypeError, test_type) + + def test_dtype(): + data = fluid.data(shape=[10], dtype="float16", name="input") + fluid.layers.isfinite(data) + + self.assertRaises(TypeError, test_dtype) + + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestFP16Inf(TestInf): diff --git a/python/paddle/fluid/tests/unittests/test_linspace.py b/python/paddle/fluid/tests/unittests/test_linspace.py index 9e88541fe13..7d034d224dd 100644 --- a/python/paddle/fluid/tests/unittests/test_linspace.py +++ b/python/paddle/fluid/tests/unittests/test_linspace.py @@ -99,6 +99,39 @@ class TestLinspaceOpError(unittest.TestCase): self.assertRaises(ValueError, test_device_value) + def test_start_type(): + fluid.layers.linspace([0], 10, 1, dtype="float32") + + self.assertRaises(TypeError, test_start_type) + + def test_end_dtype(): + fluid.layers.linspace(0, [10], 1, dtype="float32") + + self.assertRaises(TypeError, test_end_dtype) + + def test_step_dtype(): + fluid.layers.linspace(0, 10, [0], dtype="float32") + + self.assertRaises(TypeError, test_step_dtype) + + def test_start_dtype(): + start = fluid.data(shape=[1], type="int32", name="start") + fluid.layers.linspace(start, 10, 1, dtype="float32") + + self.assertRaises(TypeError, test_start_dtype) + + def test_end_dtype(): + end = fluid.data(shape=[1], type="int32", name="end") + fluid.layers.linspace(0, end, 1, dtype="float32") + + self.assertRaises(TypeError, test_end_dtype) + + def test_step_dtype(): + step = fluid.data(shape=[1], type="int32", name="step") + fluid.layers.linspace(0, 10, step, dtype="float32") + + self.assertRaises(TypeError, test_step_dtype) + if __name__ == "__main__": unittest.main() -- GitLab