diff --git a/paddle/fluid/operators/arg_min_max_op_base.h b/paddle/fluid/operators/arg_min_max_op_base.h index 76c8426123c783db404babc9ece220c83255767e..0fc7b47c62ea9d7da805b797fcf5e4db4e39328d 100644 --- a/paddle/fluid/operators/arg_min_max_op_base.h +++ b/paddle/fluid/operators/arg_min_max_op_base.h @@ -136,14 +136,21 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "arg_min_max"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "arg_min_max"); const auto& x_dims = ctx->GetInputDim("X"); int64_t axis = ctx->Attrs().Get("axis"); bool keepdims = ctx->Attrs().Get("keepdims"); - PADDLE_ENFORCE(axis >= -x_dims.size() && axis < x_dims.size(), - "'axis' must be inside [-Rank(X), Rank(X))"); + PADDLE_ENFORCE_GE(axis, -x_dims.size(), + platform::errors::InvalidArgument( + "'axis'(%d) must be greater than or equal to" + " -Rank(X)(%d).", + axis, -x_dims.size())); + PADDLE_ENFORCE_LT( + axis, x_dims.size(), + platform::errors::InvalidArgument( + "'axis'(%d) must be less than Rank(X)(%d).", axis, x_dims.size())); auto x_rank = x_dims.size(); if (axis < 0) axis += x_rank; diff --git a/paddle/fluid/operators/argsort_op.cc b/paddle/fluid/operators/argsort_op.cc index 867699f32abb61818b538d689e510ef4209ea94a..6fc2c1d35f70358a7531aada7ff82bdcb6f50074 100644 --- a/paddle/fluid/operators/argsort_op.cc +++ b/paddle/fluid/operators/argsort_op.cc @@ -23,25 +23,23 @@ class ArgsortOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ArgsortOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ArgsortOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Indices"), - "Output(Indices) of ArgsortOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "argsort"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "argsort"); + OP_INOUT_CHECK(ctx->HasOutput("Indices"), "Output", "Indices", "argsort"); auto in_dims = ctx->GetInputDim("X"); int axis = ctx->Attrs().Get("axis"); auto num_dims = in_dims.size(); - PADDLE_ENFORCE(axis < num_dims, - "Attr(axis) %d of ArgsortOp is out of bounds for Input(X)'s " - "rank %d.", - axis, num_dims); - PADDLE_ENFORCE(axis >= -num_dims, - "Attr(axis) %d of ArgsortOp must be not less than " - "-rank(Input(X)) (%d).", - axis, num_dims); + PADDLE_ENFORCE_GE(axis, -num_dims, + platform::errors::InvalidArgument( + "'axis'(%d) must be greater than or equal to" + " -num_dims(%d).", + axis, -num_dims)); + PADDLE_ENFORCE_LT( + axis, num_dims, + platform::errors::InvalidArgument( + "'axis'(%d) must be less than num_dims(%d).", axis, num_dims)); ctx->ShareDim("X", "Out"); ctx->ShareDim("X", "Indices"); diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 457b641a234beab672a25cefb333a29f16582175..933d959d587be90a14d8a4943b9cc9119e9e5b9c 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -59,12 +59,8 @@ class CastOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE_EQ( - context->HasInput("X"), true, - platform::errors::NotFound("The input(X) of cast op must be set")); - PADDLE_ENFORCE_EQ( - context->HasOutput("Out"), true, - platform::errors::NotFound("The output of cast op must be set")); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "cast"); + OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out", "cast"); context->SetOutputDim("Out", context->GetInputDim("X")); context->ShareLoD("X", "Out"); } diff --git a/paddle/fluid/operators/diag_op.cc b/paddle/fluid/operators/diag_op.cc index 1a3dd006c7e6388277d74d21313f085a10b23019..ecbddc346a9409e0601781bc1e7e6a4af83ac4de 100644 --- a/paddle/fluid/operators/diag_op.cc +++ b/paddle/fluid/operators/diag_op.cc @@ -22,15 +22,16 @@ class DiagOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Diagonal"), - "Input(Diagonal) of DiagOp should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of DiagOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Diagonal"), "Input", "Diagonal", "diag"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "diag"); auto s_dims = ctx->GetInputDim("Diagonal"); - PADDLE_ENFORCE(s_dims.size() == 1, - "The rank of Input(Diagonal) should only be 1."); + + PADDLE_ENFORCE_EQ( + s_dims.size(), 1UL, + platform::errors::InvalidArgument( + "The dimension of 'diagonal' must be 1, but now it is %d.", + s_dims.size())); ctx->SetOutputDim("Out", {s_dims[0], s_dims[0]}); } diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index e705534c99e76bc2c03229dff8393ec963770681..68da9c49ba04e1bfcae25e295b5bf768907c6511 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -196,11 +196,16 @@ def cast(x, dtype): # [[ 1 -2] # [ 0 4]] int32 """ - helper = LayerHelper('cast', **locals()) check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'cast') + check_dtype(dtype, 'dtype', [ + 'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int64', + 'uint8' + ], 'cast') + + helper = LayerHelper('cast', **locals()) out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='cast', @@ -768,6 +773,9 @@ def argmin(x, axis=0): # [[0 0 2] # [1 0 2]] """ + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], + 'argmin') helper = LayerHelper("arg_min", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( @@ -828,6 +836,9 @@ def argmax(x, axis=0): # [[2 3 1] # [0 3 1]] """ + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], + 'argmax') helper = LayerHelper("arg_max", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( @@ -909,6 +920,9 @@ def argsort(input, axis=-1, descending=False, name=None): # [4. 7. 4. 6.] # [5. 7. 7. 9.]]] """ + check_variable_and_dtype( + input, 'input', + ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort') helper = LayerHelper("argsort", **locals()) out = helper.create_variable_for_type_inference( dtype=input.dtype, stop_gradient=True) @@ -1106,6 +1120,7 @@ def has_inf(x): res = fluid.layers.has_inf(data) """ + # check_type(x, 'x', (Variable), 'has_inf') helper = LayerHelper("isinf", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) @@ -1130,6 +1145,7 @@ def has_nan(x): res = fluid.layers.has_nan(data) """ + # check_type(x, 'x', (Variable), 'has_nan') helper = LayerHelper("isnan", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) @@ -1333,7 +1349,9 @@ def diag(diagonal): # diagonal.shape=(3,) data.shape=(3, 3) """ - + check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag') + check_dtype(diagonal.dtype, 'diagonal', + ['float32', 'float64', 'int32', 'int64'], 'diag') helper = LayerHelper("diag", **locals()) if not isinstance(diagonal, Variable): diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index aab789bf6399cdb435e6bfa896b9b23aed3dfe3c..1957ad6e2b1e864c9122a828c5ac9ab821f26ac0 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -173,7 +173,7 @@ class FusionGroupPassCastTest(FusionGroupPassTest): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1]) - tmp_1 = layers.cast(tmp_0, dtype="double") + tmp_1 = layers.cast(tmp_0, dtype="float64") tmp_2 = layers.cast(tmp_1, dtype="float32") self.append_gradients(tmp_2) diff --git a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py index 3741adf8bbd9db8452401d0ef285e4f51066a246..0201f0635a5afeb285cdbca3e8d526a1ff5032f2 100644 --- a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py @@ -20,6 +20,7 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core +from paddle.fluid import Program, program_guard class BaseTestCase(OpTest): @@ -285,5 +286,22 @@ class APT_ArgMaxTest(unittest.TestCase): self.assertRaises(TypeError, test_dtype2) +class TestArgMinMaxOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + + def test_argmax_x_type(): + x1 = [1, 2, 3] + output = fluid.layers.argmax(x=x1) + + self.assertRaises(TypeError, test_argmax_x_type) + + def test_argmin_x_type(): + x2 = [1, 2, 3] + output = fluid.layers.argmin(x=x2) + + self.assertRaises(TypeError, test_argmin_x_type) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index 758338743625eda4ea632aca590cbe3a7a43de36..e53a0e83dbc7a3cd18f70c3935a707873acf0d3f 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -351,5 +351,28 @@ class TestSortOnGPU(TestSortOnCPU): self.place = core.CPUPlace() +class TestArgsortErrorOnCPU(unittest.TestCase): + def init_place(self): + self.place = core.CPUPlace() + + def test_error(self): + self.init_place() + with fluid.program_guard(fluid.Program()): + + def test_input_type(): + x = [1] + output = fluid.layers.argsort(input=x) + + self.assertRaises(TypeError, test_input_type) + + +class TestArgsortErrorOnGPU(TestArgsortErrorOnCPU): + def init_place(self): + if core.is_compiled_with_cuda(): + self.place = core.CUDAPlace(0) + else: + self.place = core.CPUPlace() + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 284d7101aa0156ffd581235fa509535c01fa3bac..084efc945592dc771e41d07ac8fd6e3c2800f23c 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -78,10 +78,14 @@ class TestCastOpError(unittest.TestCase): np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') # The input dtype of cast_op must be bool, float16, float32, float64, int32, int64, uint8. - x2 = fluid.layers.data(name='x2', shape=[4], dtype='int8') + x2 = fluid.layers.data(name='x2', shape=[4], dtype='int16') self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32') - x3 = fluid.layers.data(name='x3', shape=[4], dtype='int16') - self.assertRaises(TypeError, fluid.layers.cast, x3, 'int32') + + def test_dtype_type(): + x4 = fluid.layers.data(name='x4', shape=[4], dtype='int32') + output = fluid.layers.cast(x=x4, dtype='int16') + + self.assertRaises(TypeError, test_dtype_type) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_diag.py b/python/paddle/fluid/tests/unittests/test_diag.py index eed8b91f0e3dc5a0552e9d912e2b63d724c4d6d9..b6566676d2533aad5272fe61dbedbc1d55ea213b 100644 --- a/python/paddle/fluid/tests/unittests/test_diag.py +++ b/python/paddle/fluid/tests/unittests/test_diag.py @@ -17,6 +17,9 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid +from paddle.fluid import core +from paddle.fluid import Program, program_guard class TestDiagOp(OpTest): @@ -39,5 +42,16 @@ class TestDiagOpCase1(TestDiagOp): self.case = np.array([3], dtype='int32') +class TestDiagError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + + def test_diag_type(): + x = [1, 2, 3] + output = fluid.layers.diag(diag=x) + + self.assertRaises(TypeError, test_diag_type) + + if __name__ == "__main__": unittest.main()