未验证 提交 af149f25 编写于 作者: G gfwm0502 提交者: GitHub

OP(compare/get_places/shrink_rnn_memory) error message enhancement (#23780)

As the title.
上级 47629418
......@@ -81,7 +81,7 @@ class CompareOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* context) const override {
OpComment comment;
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
OP_INOUT_CHECK(context->HasInput("Y"), "Output", "Y", comment.type);
OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type);
auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y");
......
......@@ -51,8 +51,9 @@ class GetPlacesOp : public framework::OperatorBase {
device_count =
is_gpu ? CUDADevCount() : std::thread::hardware_concurrency();
}
PADDLE_ENFORCE_NE(device_count, 0UL, "Cannot indicate %s device count",
is_gpu ? "GPU" : "CPU");
PADDLE_ENFORCE_NE(device_count, 0UL, platform::errors::InvalidArgument(
"Cannot indicate %s device count",
is_gpu ? "GPU" : "CPU"));
auto out_var_name = Output("Out");
auto &places = *(GET_DATA_SAFELY(scope.FindVar(out_var_name), "Output",
......@@ -61,8 +62,9 @@ class GetPlacesOp : public framework::OperatorBase {
places.reserve(device_count);
if (is_gpu) {
PADDLE_ENFORCE_LE(device_count, CUDADevCount(),
platform::errors::InvalidArgument(
"Only %d CUDA devices found, cannot set to %d",
CUDADevCount(), device_count);
CUDADevCount(), device_count));
for (size_t i = 0; i < device_count; ++i) {
places.emplace_back(platform::CUDAPlace(static_cast<int>(i)));
}
......
......@@ -31,11 +31,16 @@ class ShrinkRNNMemoryOp : public ArrayOp {
void RunImpl(const framework::Scope &scope,
const platform::Place &place) const override {
auto *x_var = scope.FindVar(Input("X"));
PADDLE_ENFORCE(x_var != nullptr, "Input X must be set");
PADDLE_ENFORCE_NOT_NULL(x_var,
platform::errors::NotFound(
"Input(X) of ShrinkRNNMemoryOp is not found."));
auto &x_tensor = x_var->Get<framework::LoDTensor>();
size_t offset = this->GetOffset(scope, place);
auto *rank_table_var = scope.FindVar(Input("RankTable"));
PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set");
PADDLE_ENFORCE_NOT_NULL(
rank_table_var,
platform::errors::NotFound(
"Input(RankTable) of ShrinkRNNMemoryOp is not found."));
auto &rank_table = rank_table_var->Get<framework::LoDRankTable>();
auto &rank_items = rank_table.items();
......@@ -46,7 +51,9 @@ class ShrinkRNNMemoryOp : public ArrayOp {
rank_items.begin();
auto *out_var = scope.FindVar(Output("Out"));
PADDLE_ENFORCE(out_var != nullptr, "Output(Out) must be set.");
PADDLE_ENFORCE_NOT_NULL(
out_var, platform::errors::NotFound(
"Output(Out) of ShrinkRNNMemoryOp is not found."));
auto &out_tensor = *out_var->GetMutable<framework::LoDTensor>();
size_t height = dst_num_rows;
......@@ -96,9 +103,10 @@ batch size for the next time step.
class ShrinkRNNMemoryInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasInput("I"));
PADDLE_ENFORCE(context->HasInput("RankTable"));
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "ShrinkRNNMemory");
OP_INOUT_CHECK(context->HasInput("I"), "Input", "I", "ShrinkRNNMemory");
OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable",
"ShrinkRNNMemory");
context->SetOutputDim("Out", context->GetInputDim("X"));
// For runtime, output's lod is computed according to input's lod, but
// remove the finished sequence. It is set in detail kernel implementation.
......@@ -121,10 +129,13 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
const platform::Place &place) const override {
auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out")));
auto *dx_var = scope.FindVar(Output(framework::GradVarName("X")));
PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr");
PADDLE_ENFORCE_NOT_NULL(
dx_var, platform::errors::NotFound(
"Input(X@GRAD) of ShrinkRNNMemoryGradOp is not found."));
auto *x_var = scope.FindVar(Input("X"));
PADDLE_ENFORCE(x_var != nullptr);
PADDLE_ENFORCE_NOT_NULL(
x_var, platform::errors::NotFound(
"Input(x) of ShrinkRNNMemoryGradOp is not found."));
auto &x_tensor = x_var->Get<framework::LoDTensor>();
auto &dx_tensor = *dx_var->GetMutable<framework::LoDTensor>();
dx_tensor.Resize(x_tensor.dims());
......@@ -154,8 +165,9 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X")));
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "ShrinkRNNMemoryGrad");
OP_INOUT_CHECK(context->HasOutput(framework::GradVarName("X")), "Output",
"X", "ShrinkRNNMemoryGrad");
context->ShareDim("X", /*->*/ framework::GradVarName("X"));
context->ShareLoD("X", /*->*/ framework::GradVarName("X"));
......
......@@ -1816,6 +1816,9 @@ def shrink_memory(x, i, table):
usage.
"""
helper = LayerHelper('shrink_memory', **locals())
check_type(x, 'x', Variable, 'shrink_memory')
check_type(i, 'i', Variable, 'shrink_memory')
check_type(table, 'table', Variable, 'shrink_memory')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.layers.device import get_places
from decorator_helper import prog_scope
import unittest
......@@ -22,13 +23,26 @@ import unittest
class TestGetPlaces(unittest.TestCase):
@prog_scope()
def test_get_places(self):
def check_get_cpu_places(self):
places = get_places()
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
exe.run(fluid.default_main_program())
self.assertEqual(places.type, fluid.core.VarDesc.VarType.PLACE_LIST)
@prog_scope()
def check_get_gpu_places(self):
places = get_places(device_type='CUDA')
gpu = fluid.CUDAPlace(0)
exe = fluid.Executor(gpu)
exe.run(fluid.default_main_program())
self.assertEqual(places.type, fluid.core.VarDesc.VarType.PLACE_LIST)
def test_main(self):
if core.is_compiled_with_cuda():
self.check_get_gpu_places()
self.check_get_cpu_places()
if __name__ == '__main__':
unittest.main()
......@@ -20,7 +20,7 @@ from paddle.fluid.executor import Executor
import paddle.fluid.layers as layers
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import default_main_program, switch_main_program
from paddle.fluid.framework import Program
from paddle.fluid.framework import Program, program_guard
import numpy as np
from paddle.fluid.layers.control_flow import shrink_memory
......@@ -104,5 +104,36 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase):
self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)
class TestShrinkRNNMemoryOpError(unittest.TestCase):
def test_erroes(self):
with program_guard(Program(), Program()):
x = layers.zeros(dtype='int64', shape=[3, 100])
i = layers.zeros(dtype='int64', shape=[1])
rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]])
rank_table_tensor.set(
np.random.random(size=(6, 1)).astype('float32'),
core.CPUPlace())
rank_table = np.random.random(size=(6, 1)).astype('float32')
# The type of x in shrink_rnn_memory must be Variable.
def test_x_type():
out = shrink_memory(x=1, i=i, table=rank_table_tensor)
self.assertRaises(TypeError, test_x_type)
# The type of i in shrink_rnn_memory must be Variable.
def test_i_type():
out = shrink_memory(x=x, i=0, table=rank_table_tensor)
self.assertRaises(TypeError, test_i_type)
# The type of table in shrink_rnn_memory must be Variable.
def test_table_type():
out = shrink_memory(x=x, i=i, table=rank_table)
self.assertRaises(TypeError, test_table_type)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册