diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 0f769a6aa00304e2d0e774616a01f0b96e4617ef..549f44250c88c3675fb55fcb1c6f5ffa31189a9b 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -75,10 +75,10 @@ class AssignKernel { if (x == nullptr) { return; } + PADDLE_ENFORCE_EQ( + ctx.HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of assign_op is not found.")); auto *out = ctx.OutputVar("Out"); - PADDLE_ENFORCE( - out != nullptr, - "The Output(Out) should not be null if the Input(X) is set."); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(ctx.GetPlace()); diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 8bb78ad7da60e4c6d3abf02a8a9e99f043ea8769..d5b73fc2f65666305eb0498dd5299d7639663b2c 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -28,8 +28,9 @@ class AssignValueOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of AssignValueOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of assign_op is not found.")); auto shape = ctx->Attrs().Get>("shape"); ctx->SetOutputDim("Out", framework::make_ddim(shape)); } diff --git a/paddle/fluid/operators/fill_any_like_op.cc b/paddle/fluid/operators/fill_any_like_op.cc index 613caca374ffa50c98c7576f7936976bafdef577..85e0789b0c04433220f41cf7b5c995b7a2b25822 100644 --- a/paddle/fluid/operators/fill_any_like_op.cc +++ b/paddle/fluid/operators/fill_any_like_op.cc @@ -23,10 +23,8 @@ class FillAnyLikeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of FillAnyLikeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of FillAnyLikeOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fill_any_like"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fill_any_like"); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/fill_any_like_op.h b/paddle/fluid/operators/fill_any_like_op.h index f0b6bcb16e20e26512e04c8cc2216d441057db52..9c514ed3aaa38fc42a7a5ad28a50c7c7267084df 100644 --- a/paddle/fluid/operators/fill_any_like_op.h +++ b/paddle/fluid/operators/fill_any_like_op.h @@ -40,15 +40,20 @@ class FillAnyLikeKernel : public framework::OpKernel { auto common_type_value = static_cast(value); - PADDLE_ENFORCE( + PADDLE_ENFORCE_EQ( (common_type_value >= static_cast(std::numeric_limits::lowest())) && (common_type_value <= static_cast(std::numeric_limits::max())), - "filled value is out of range for targeted type in fill_any_like " - "kernel"); - - PADDLE_ENFORCE(!std::isnan(value), "filled value is NaN"); + true, platform::errors::InvalidArgument( + "filled value is out of range for" + " targeted type in fill_any_like, your kernel type is %s" + ", please check value you set.", + typeid(T).name())); + PADDLE_ENFORCE_EQ( + std::isnan(value), false, + platform::errors::InvalidArgument("filled value should not be NaN," + " but received NaN")); math::SetConstant setter; setter(context.template device_context(), out, diff --git a/paddle/fluid/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc index be910810b145cbbb7848fdf7b0c75074987c0b26..af81c8c32ece1b4020350b0c66ef111c4d1d0bcf 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cc @@ -22,10 +22,8 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of FillZerosLikeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of FillZerosLikeOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fill_zeros_like"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fill_zeros_like"); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/linspace_op.cc b/paddle/fluid/operators/linspace_op.cc index 7ea3b06e02ed3a50b22463693b7df3372587dc0c..0a7146be83dcb673573f1fdcb94ed2d2c57bd2c3 100644 --- a/paddle/fluid/operators/linspace_op.cc +++ b/paddle/fluid/operators/linspace_op.cc @@ -24,25 +24,29 @@ class LinspaceOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Start"), "Input(Start) of LinspaceOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Stop"), - "Input(Stop) of LinspaceOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Num"), - "Input(Num) of LinspaceOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(OUt) of LinspaceOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Start"), "Input", "Start", "linspace"); + OP_INOUT_CHECK(ctx->HasInput("Stop"), "Input", "Stop", "linspace"); + OP_INOUT_CHECK(ctx->HasInput("Num"), "Input", "Num", "linspace"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "linspace"); auto s_dims = ctx->GetInputDim("Start"); - PADDLE_ENFORCE((s_dims.size() == 1) && (s_dims[0] == 1), - "The shape of Input(Start) should be [1]."); - + PADDLE_ENFORCE_EQ((s_dims.size() == 1) && (s_dims[0] == 1), true, + platform::errors::InvalidArgument( + "The shape of Input(Start) must be [1]," + "but received input shape is [%s].", + s_dims)); auto e_dims = ctx->GetInputDim("Stop"); - PADDLE_ENFORCE((e_dims.size() == 1) && (e_dims[0] == 1), - "The shape of Input(Stop) should be [1]."); - + PADDLE_ENFORCE_EQ((e_dims.size() == 1) && (e_dims[0] == 1), true, + platform::errors::InvalidArgument( + "The shape of Input(Stop) must be [1]," + "but received input shape is [%s].", + e_dims)); auto step_dims = ctx->GetInputDim("Num"); - PADDLE_ENFORCE((step_dims.size() == 1) && (step_dims[0] == 1), - "The shape of Input(Num) should be [1]."); - + PADDLE_ENFORCE_EQ( + (step_dims.size() == 1) && (step_dims[0] == 1), true, + platform::errors::InvalidArgument("The shape of Input(Num) must be [1]," + "but received input shape is [%s].", + step_dims)); ctx->SetOutputDim("Out", {-1}); } diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index c4a553d503842ec9f135cf0d5cd7418f28e58f9c..e238efda8cdc45664c6b453cd06425a6ffb6b54d 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -961,8 +961,10 @@ def ones(shape, dtype, force_cpu=False): import paddle.fluid as fluid data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]] """ - assert isinstance(shape, list) or isinstance( - shape, tuple), "The shape's type should be list or tuple." + check_type(shape, 'shape', (list, tuple), 'ones') + check_dtype(dtype, 'create data type', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'ones') assert reduce(lambda x, y: x * y, shape) > 0, "The shape is invalid: %s." % (str(shape)) return fill_constant(value=1.0, **locals()) @@ -990,6 +992,7 @@ def zeros(shape, dtype, force_cpu=False): import paddle.fluid as fluid data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] """ + check_type(shape, 'shape', (list, tuple), 'zeros') check_dtype(dtype, 'create data type', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'zeros') @@ -1174,7 +1177,10 @@ def isfinite(x): dtype="float32") out = fluid.layers.isfinite(var) """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "isfinite") helper = LayerHelper("isfinite", **locals()) + out = helper.create_variable_for_type_inference(dtype='bool') helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out}) return out @@ -1273,12 +1279,25 @@ def linspace(start, stop, num, dtype): """ helper = LayerHelper("linspace", **locals()) + check_type(start, 'start', (Variable, float, int), linspace) + check_type(stop, 'stop', (Variable, float, int), linspace) + check_type(num, 'num', (Variable, float, int), linspace) + if not isinstance(start, Variable): start = fill_constant([1], dtype, start) + else: + check_variable_and_dtype(start, "start", ["float32", "float64"], + "linspace") + if not isinstance(stop, Variable): stop = fill_constant([1], dtype, stop) + else: + check_variable_and_dtype(stop, "stop", ["float32", "float64"], + "linspace") if not isinstance(num, Variable): num = fill_constant([1], 'int32', num) + else: + check_variable_and_dtype(num, "num", ["int32"], "linspace") out = helper.create_variable_for_type_inference(dtype=start.dtype) @@ -1315,9 +1334,16 @@ def zeros_like(x, out=None): """ + check_variable_and_dtype( + x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like') helper = LayerHelper("zeros_like", **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) + else: + check_variable_and_dtype( + out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'], + 'ones_like') + helper.append_op( type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]}) out.stop_gradient = True @@ -1462,10 +1488,16 @@ def ones_like(x, out=None): data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0] """ + check_variable_and_dtype( + x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like') helper = LayerHelper("ones_like", **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) + else: + check_variable_and_dtype( + out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'], + 'ones_like') helper.append_op( type='fill_any_like', inputs={'X': [x]}, diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py index b9b203048377b924d910684243b43b7b78f8828c..cd902b7e00eea41bfbc026bc08a47a30a34db7dd 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py @@ -247,6 +247,34 @@ class TestOnesZerosError(unittest.TestCase): self.assertRaises(ValueError, test_device_error4) + def test_ones_like_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + fluid.layers.ones_like([10], dtype="float") + + self.assertRaises(TypeError, test_ones_like_type_error) + + def test_ones_like_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float16") + fluid.layers.ones_like(data, dtype="float32") + + self.assertRaises(TypeError, test_ones_like_dtype_error) + + def test_ones_like_out_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + fluid.layers.ones_like(data, dtype="float32", out=[10]) + + self.assertRaises(TypeError, test_ones_like_out_type_error) + + def test_ones_like_out_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + out = fluid.data(name="out", shape=[10], dtype="float16") + fluid.layers.ones_like(data, dtype="float32", out=out) + + self.assertRaises(TypeError, test_ones_like_out_dtype_error) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index ae7639f29ca5d99488e65e7dbf57930f9255a65f..37d4e3e7abb0d7b1dd878a5df20ede401a114606 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -457,6 +457,30 @@ class ApiOnesZerosError(unittest.TestCase): self.assertRaises(ValueError, test_error2) + def test_error3(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.ones(shape=10, dtype="int64") + + self.assertRaises(TypeError, test_error3) + + def test_error4(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.ones(shape=[10], dtype="int8") + + self.assertRaises(TypeError, test_error4) + + def test_error5(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.zeros(shape=10, dtype="int64") + + self.assertRaises(TypeError, test_error5) + + def test_error6(): + with fluid.program_guard(fluid.Program()): + ones = fluid.layers.zeros(shape=[10], dtype="int8") + + self.assertRaises(TypeError, test_error6) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py b/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py index 935653b07a6a4e1d344e8040fa4a0ed72b9b164d..46590bf187a86d6c86c09370321f1b1f52817d76 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid as fluid from paddle.fluid.framework import convert_np_dtype_to_dtype_ from op_test import OpTest @@ -46,5 +47,36 @@ class TestFillZerosLike2OpFp64(TestFillZerosLike2Op): self.dtype = np.float64 +class TestZerosError(unittest.TestCase): + def test_errors(self): + def test_zeros_like_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + fluid.layers.zeros_like([10], dtype="float") + + self.assertRaises(TypeError, test_zeros_like_type_error) + + def test_zeros_like_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float16") + fluid.layers.zeros_like(data, dtype="float32") + + self.assertRaises(TypeError, test_zeros_like_dtype_error) + + def test_zeros_like_out_type_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + fluid.layers.zeros_like(data, dtype="float32", out=[10]) + + self.assertRaises(TypeError, test_zeros_like_out_type_error) + + def test_zeros_like_out_dtype_error(): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data = fluid.data(name="data", shape=[10], dtype="float32") + out = fluid.data(name="out", shape=[10], dtype="float16") + fluid.layers.zeros_like(data, dtype="float32", out=out) + + self.assertRaises(TypeError, test_zeros_like_out_dtype_error) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index 22bd3bcf4a1e2a292bfddf01f4bc206b86b9cc0e..9b682bb62ec5310f2dd3061179cc4d81669b1225 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -14,8 +14,10 @@ import unittest import numpy as np +import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest +import unittest class TestInf(OpTest): @@ -38,6 +40,20 @@ class TestInf(OpTest): self.check_output() +class TestRaiseError(unittest.TestCase): + def test_errors(self): + def test_type(): + fluid.layers.isfinite([10]) + + self.assertRaises(TypeError, test_type) + + def test_dtype(): + data = fluid.data(shape=[10], dtype="float16", name="input") + fluid.layers.isfinite(data) + + self.assertRaises(TypeError, test_dtype) + + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestFP16Inf(TestInf): diff --git a/python/paddle/fluid/tests/unittests/test_linspace.py b/python/paddle/fluid/tests/unittests/test_linspace.py index 9e88541fe13a26cb737fcbb9d9cf980d8e194c58..7d034d224ddc8f4ac486d9792e0233c64901c1e2 100644 --- a/python/paddle/fluid/tests/unittests/test_linspace.py +++ b/python/paddle/fluid/tests/unittests/test_linspace.py @@ -99,6 +99,39 @@ class TestLinspaceOpError(unittest.TestCase): self.assertRaises(ValueError, test_device_value) + def test_start_type(): + fluid.layers.linspace([0], 10, 1, dtype="float32") + + self.assertRaises(TypeError, test_start_type) + + def test_end_dtype(): + fluid.layers.linspace(0, [10], 1, dtype="float32") + + self.assertRaises(TypeError, test_end_dtype) + + def test_step_dtype(): + fluid.layers.linspace(0, 10, [0], dtype="float32") + + self.assertRaises(TypeError, test_step_dtype) + + def test_start_dtype(): + start = fluid.data(shape=[1], type="int32", name="start") + fluid.layers.linspace(start, 10, 1, dtype="float32") + + self.assertRaises(TypeError, test_start_dtype) + + def test_end_dtype(): + end = fluid.data(shape=[1], type="int32", name="end") + fluid.layers.linspace(0, end, 1, dtype="float32") + + self.assertRaises(TypeError, test_end_dtype) + + def test_step_dtype(): + step = fluid.data(shape=[1], type="int32", name="step") + fluid.layers.linspace(0, 10, step, dtype="float32") + + self.assertRaises(TypeError, test_step_dtype) + if __name__ == "__main__": unittest.main()