diff --git a/paddle/fluid/operators/controlflow/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc index c1730b10671d08ff3f5d2dff8c1e0210eed6eebb..579ff7148044fb23c7169f12753e6fa95d4f8a06 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -81,7 +81,7 @@ class CompareOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* context) const override { OpComment comment; OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type); - OP_INOUT_CHECK(context->HasInput("Y"), "Output", "Y", comment.type); + OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type); auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); diff --git a/paddle/fluid/operators/controlflow/get_places_op.cc b/paddle/fluid/operators/controlflow/get_places_op.cc index eff88f54ade6e4bc71e8d80771b3f757819354a9..d138d0874fed27a071509bcf58f2640bdd932b89 100644 --- a/paddle/fluid/operators/controlflow/get_places_op.cc +++ b/paddle/fluid/operators/controlflow/get_places_op.cc @@ -51,8 +51,9 @@ class GetPlacesOp : public framework::OperatorBase { device_count = is_gpu ? CUDADevCount() : std::thread::hardware_concurrency(); } - PADDLE_ENFORCE_NE(device_count, 0UL, "Cannot indicate %s device count", - is_gpu ? "GPU" : "CPU"); + PADDLE_ENFORCE_NE(device_count, 0UL, platform::errors::InvalidArgument( + "Cannot indicate %s device count", + is_gpu ? "GPU" : "CPU")); auto out_var_name = Output("Out"); auto &places = *(GET_DATA_SAFELY(scope.FindVar(out_var_name), "Output", @@ -61,8 +62,9 @@ class GetPlacesOp : public framework::OperatorBase { places.reserve(device_count); if (is_gpu) { PADDLE_ENFORCE_LE(device_count, CUDADevCount(), - "Only %d CUDA devices found, cannot set to %d", - CUDADevCount(), device_count); + platform::errors::InvalidArgument( + "Only %d CUDA devices found, cannot set to %d", + CUDADevCount(), device_count)); for (size_t i = 0; i < device_count; ++i) { places.emplace_back(platform::CUDAPlace(static_cast(i))); } diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index af0d4f17b02f8badf0ba135903166a2a277380da..3c66cd0dadab8d6bed7b60f847ed17aeeb0ab347 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -31,11 +31,16 @@ class ShrinkRNNMemoryOp : public ArrayOp { void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { auto *x_var = scope.FindVar(Input("X")); - PADDLE_ENFORCE(x_var != nullptr, "Input X must be set"); + PADDLE_ENFORCE_NOT_NULL(x_var, + platform::errors::NotFound( + "Input(X) of ShrinkRNNMemoryOp is not found.")); auto &x_tensor = x_var->Get(); size_t offset = this->GetOffset(scope, place); auto *rank_table_var = scope.FindVar(Input("RankTable")); - PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set"); + PADDLE_ENFORCE_NOT_NULL( + rank_table_var, + platform::errors::NotFound( + "Input(RankTable) of ShrinkRNNMemoryOp is not found.")); auto &rank_table = rank_table_var->Get(); auto &rank_items = rank_table.items(); @@ -46,7 +51,9 @@ class ShrinkRNNMemoryOp : public ArrayOp { rank_items.begin(); auto *out_var = scope.FindVar(Output("Out")); - PADDLE_ENFORCE(out_var != nullptr, "Output(Out) must be set."); + PADDLE_ENFORCE_NOT_NULL( + out_var, platform::errors::NotFound( + "Output(Out) of ShrinkRNNMemoryOp is not found.")); auto &out_tensor = *out_var->GetMutable(); size_t height = dst_num_rows; @@ -96,9 +103,10 @@ batch size for the next time step. class ShrinkRNNMemoryInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("X")); - PADDLE_ENFORCE(context->HasInput("I")); - PADDLE_ENFORCE(context->HasInput("RankTable")); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "ShrinkRNNMemory"); + OP_INOUT_CHECK(context->HasInput("I"), "Input", "I", "ShrinkRNNMemory"); + OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable", + "ShrinkRNNMemory"); context->SetOutputDim("Out", context->GetInputDim("X")); // For runtime, output's lod is computed according to input's lod, but // remove the finished sequence. It is set in detail kernel implementation. @@ -121,10 +129,13 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { const platform::Place &place) const override { auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out"))); auto *dx_var = scope.FindVar(Output(framework::GradVarName("X"))); - PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr"); + PADDLE_ENFORCE_NOT_NULL( + dx_var, platform::errors::NotFound( + "Input(X@GRAD) of ShrinkRNNMemoryGradOp is not found.")); auto *x_var = scope.FindVar(Input("X")); - PADDLE_ENFORCE(x_var != nullptr); - + PADDLE_ENFORCE_NOT_NULL( + x_var, platform::errors::NotFound( + "Input(x) of ShrinkRNNMemoryGradOp is not found.")); auto &x_tensor = x_var->Get(); auto &dx_tensor = *dx_var->GetMutable(); dx_tensor.Resize(x_tensor.dims()); @@ -154,8 +165,9 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("X")); - PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X"))); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "ShrinkRNNMemoryGrad"); + OP_INOUT_CHECK(context->HasOutput(framework::GradVarName("X")), "Output", + "X", "ShrinkRNNMemoryGrad"); context->ShareDim("X", /*->*/ framework::GradVarName("X")); context->ShareLoD("X", /*->*/ framework::GradVarName("X")); diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 9e2d3b1c5a6a9425f7bc62979b5385ce71695e7e..753b869202ca025c74de04c309876606ee8aec1d 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1816,6 +1816,9 @@ def shrink_memory(x, i, table): usage. """ helper = LayerHelper('shrink_memory', **locals()) + check_type(x, 'x', Variable, 'shrink_memory') + check_type(i, 'i', Variable, 'shrink_memory') + check_type(table, 'table', Variable, 'shrink_memory') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='shrink_rnn_memory', diff --git a/python/paddle/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py index e6be3a3a3e5b6ae7570d2ebdf2836e48345f5734..a6deeab457c090d63746cce3fa1107acf71e0a5e 100644 --- a/python/paddle/fluid/tests/unittests/test_get_places_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_places_op.py @@ -15,6 +15,7 @@ from __future__ import print_function import paddle.fluid as fluid +import paddle.fluid.core as core from paddle.fluid.layers.device import get_places from decorator_helper import prog_scope import unittest @@ -22,13 +23,26 @@ import unittest class TestGetPlaces(unittest.TestCase): @prog_scope() - def test_get_places(self): + def check_get_cpu_places(self): places = get_places() cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) exe.run(fluid.default_main_program()) self.assertEqual(places.type, fluid.core.VarDesc.VarType.PLACE_LIST) + @prog_scope() + def check_get_gpu_places(self): + places = get_places(device_type='CUDA') + gpu = fluid.CUDAPlace(0) + exe = fluid.Executor(gpu) + exe.run(fluid.default_main_program()) + self.assertEqual(places.type, fluid.core.VarDesc.VarType.PLACE_LIST) + + def test_main(self): + if core.is_compiled_with_cuda(): + self.check_get_gpu_places() + self.check_get_cpu_places() + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index 97f79f9421d498723da4c7992551f1210d3f6003..6e1099e5a391c38bf951165195ade0762b6d788f 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -20,7 +20,7 @@ from paddle.fluid.executor import Executor import paddle.fluid.layers as layers from paddle.fluid.backward import append_backward from paddle.fluid.framework import default_main_program, switch_main_program -from paddle.fluid.framework import Program +from paddle.fluid.framework import Program, program_guard import numpy as np from paddle.fluid.layers.control_flow import shrink_memory @@ -104,5 +104,36 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01) +class TestShrinkRNNMemoryOpError(unittest.TestCase): + def test_erroes(self): + with program_guard(Program(), Program()): + x = layers.zeros(dtype='int64', shape=[3, 100]) + i = layers.zeros(dtype='int64', shape=[1]) + rank_table_tensor = core.LoDTensor() + rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]]) + rank_table_tensor.set( + np.random.random(size=(6, 1)).astype('float32'), + core.CPUPlace()) + rank_table = np.random.random(size=(6, 1)).astype('float32') + + # The type of x in shrink_rnn_memory must be Variable. + def test_x_type(): + out = shrink_memory(x=1, i=i, table=rank_table_tensor) + + self.assertRaises(TypeError, test_x_type) + + # The type of i in shrink_rnn_memory must be Variable. + def test_i_type(): + out = shrink_memory(x=x, i=0, table=rank_table_tensor) + + self.assertRaises(TypeError, test_i_type) + + # The type of table in shrink_rnn_memory must be Variable. + def test_table_type(): + out = shrink_memory(x=x, i=i, table=rank_table) + + self.assertRaises(TypeError, test_table_type) + + if __name__ == '__main__': unittest.main()