未验证 提交 8987946f 编写于 作者: L liym27 提交者: GitHub

Api/Op (select_input/select_ouput) error message enhancement. (#23445)

上级 fab9464f
...@@ -40,9 +40,13 @@ class SelectInputOp : public framework::OperatorBase { ...@@ -40,9 +40,13 @@ class SelectInputOp : public framework::OperatorBase {
size_t output_branch = static_cast<size_t>(GetBranchNumber(mask)); size_t output_branch = static_cast<size_t>(GetBranchNumber(mask));
const std::vector<std::string> &x_names = Inputs("X"); const std::vector<std::string> &x_names = Inputs("X");
PADDLE_ENFORCE_LT(output_branch, x_names.size(), PADDLE_ENFORCE_LT(
"Selected branch number is greater than actual branch " output_branch, x_names.size(),
"num in SelectInputOp"); platform::errors::InvalidArgument(
"Input 'Mask' in SelectInputOp is invalid. "
"'Mask' must be less than the size of input vector 'X'. "
"But received Mask = %d, X's size = %d.",
output_branch, x_names.size()));
const framework::Variable *selected_x = const framework::Variable *selected_x =
scope.FindVar(x_names[output_branch]); scope.FindVar(x_names[output_branch]);
......
...@@ -27,7 +27,11 @@ namespace operators { ...@@ -27,7 +27,11 @@ namespace operators {
// selected branch number. // selected branch number.
inline int GetBranchNumber(const framework::LoDTensor &mask) { inline int GetBranchNumber(const framework::LoDTensor &mask) {
PADDLE_ENFORCE_EQ(mask.numel(), 1, PADDLE_ENFORCE_EQ(mask.numel(), 1,
"Mask in SelectOutputOp must have numel 1."); platform::errors::InvalidArgument(
"The numel of Input(Mask) in SelectInputOp or "
"SelectOutputOp must be 1. "
"But received %d, and it's shape is [%s].",
mask.numel(), mask.dims()));
if (platform::is_cpu_place(mask.place())) { if (platform::is_cpu_place(mask.place())) {
return mask.data<int>()[0]; return mask.data<int>()[0];
} }
...@@ -36,9 +40,10 @@ inline int GetBranchNumber(const framework::LoDTensor &mask) { ...@@ -36,9 +40,10 @@ inline int GetBranchNumber(const framework::LoDTensor &mask) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
framework::TensorCopySync(mask, platform::CPUPlace(), cpu_mask.get()); framework::TensorCopySync(mask, platform::CPUPlace(), cpu_mask.get());
#else #else
PADDLE_THROW( PADDLE_THROW(platform::errors::PreconditionNotMet(
"This version of PaddlePaddle doen NOT support GPU but got GPU tensor " "This version of PaddlePaddle does NOT support GPU, "
"Mask in SelectOutputOp. Please compile WITH_GPU option"); "but got GPU tensor 'Mask' in SelectInputOp or SelectOutputOp. "
"Please compile PaddlePaddle WITH_GPU first."));
#endif #endif
return cpu_mask->data<int>()[0]; return cpu_mask->data<int>()[0];
} }
......
...@@ -41,9 +41,13 @@ class SelectOutputOp : public framework::OperatorBase { ...@@ -41,9 +41,13 @@ class SelectOutputOp : public framework::OperatorBase {
size_t output_branch = static_cast<size_t>(GetBranchNumber(mask)); size_t output_branch = static_cast<size_t>(GetBranchNumber(mask));
const std::vector<std::string> &out_names = Outputs("Out"); const std::vector<std::string> &out_names = Outputs("Out");
PADDLE_ENFORCE_LT(output_branch, out_names.size(), PADDLE_ENFORCE_LT(
"Selected branch number is greater than actual branch " output_branch, out_names.size(),
"num in SelectOutputOp"); platform::errors::InvalidArgument(
"Input 'Mask' in SelectOutputOp is invalid. "
"'Mask' must be less than the size of output vector 'Out'. "
"But received Mask = %d, Out's size = %d.",
output_branch, out_names.size()));
const framework::Variable *x = scope.FindVar(Input("X")); const framework::Variable *x = scope.FindVar(Input("X"));
framework::Variable *selected_out = scope.FindVar(out_names[output_branch]); framework::Variable *selected_out = scope.FindVar(out_names[output_branch]);
......
...@@ -56,6 +56,10 @@ def select_output(input, outputs, mask): ...@@ -56,6 +56,10 @@ def select_output(input, outputs, mask):
Variable: The outputs variables Variable: The outputs variables
""" """
helper = LayerHelper('select_output', **locals()) helper = LayerHelper('select_output', **locals())
check_type(input, 'input', (Variable), 'select_output')
check_variable_and_dtype(mask, 'mask', ['int32'], 'select_output')
check_type(outputs, 'outputs', (list, tuple), 'select_output')
helper.append_op( helper.append_op(
type='select_output', type='select_output',
inputs={'X': input, inputs={'X': input,
...@@ -80,14 +84,12 @@ def select_input(inputs, mask): ...@@ -80,14 +84,12 @@ def select_input(inputs, mask):
Variable: The selected input variable Variable: The selected input variable
""" """
helper = LayerHelper('select_input', **locals()) helper = LayerHelper('select_input', **locals())
if isinstance(inputs, list) or isinstance(inputs, tuple): check_type(inputs, 'inputs', (list, tuple), 'select_input')
check_variable_and_dtype(mask, 'mask', ['int32'], 'select_input')
input_dtype = inputs[0].dtype input_dtype = inputs[0].dtype
input_shape = inputs[0].shape input_shape = inputs[0].shape
input_type = inputs[0].type input_type = inputs[0].type
else:
input_dtype = inputs.dtype
input_shape = inputs.shape
input_type = inputs.type
out = helper.create_variable( out = helper.create_variable(
dtype=input_dtype, shape=input_shape, type=input_type) dtype=input_dtype, shape=input_shape, type=input_type)
......
...@@ -60,34 +60,67 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase): ...@@ -60,34 +60,67 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase):
self.assertTrue(np.allclose(np.asarray(ret[0]), feed_x)) self.assertTrue(np.allclose(np.asarray(ret[0]), feed_x))
self.assertTrue(np.allclose(np.asarray(ret[1]), x_grad)) self.assertTrue(np.allclose(np.asarray(ret[1]), x_grad))
def test_forward_backward_single_tensor_output(self):
program = Program() class TestSelectInputOpError(unittest.TestCase):
with program_guard(program): def test_errors(self):
x = layers.data(name='x', shape=[2], dtype='float32') with program_guard(Program(), Program()):
x.stop_gradient = False # For test gradient
mask = layers.data(name='mask', shape=[1], dtype='int32') mask = layers.data(name='mask', shape=[1], dtype='int32')
in1 = layers.data(name='in1', shape=[1], dtype='int32')
out = program.current_block().create_var( # 1. The type of inputs in select_input must be list or tuple.
dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR) def test_inputs_type():
select_input(1, mask)
select_output(x, out, mask) self.assertRaises(TypeError, test_inputs_type)
y = select_input(out, mask)
mean = layers.mean(y)
append_backward(mean)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( # 2. The type of mask in select_input must be Variable.
) else fluid.CPUPlace() def test_mask_type():
exe = Executor(place) select_input([in1], mask=1)
feed_x = np.asarray([1.3, -1.4]).astype(np.float32) self.assertRaises(TypeError, test_mask_type)
feed_mask = np.asarray([0]).astype(np.int32)
ret = exe.run(program, # 3. The dtype of mask in select_input must be int32 or int64.
feed={'x': feed_x, def test_mask_dtype():
'mask': feed_mask}, mask = layers.data(name='mask2', shape=[1], dtype='float32')
fetch_list=[y.name, x.grad_name]) select_input([in1], mask)
x_grad = np.asarray([0.5, 0.5]).astype(np.float32)
self.assertTrue(np.allclose(np.asarray(ret[0]), feed_x)) self.assertRaises(TypeError, test_mask_dtype)
self.assertTrue(np.allclose(np.asarray(ret[1]), x_grad))
class TestSelectOutput_Error(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
in1 = layers.data(name='in1', shape=[1], dtype='int32')
mask_int32 = layers.data(
name='mask_int32', shape=[1], dtype='int32')
mask_float32 = layers.data(
name='mask_float32', shape=[1], dtype='float32')
out1 = layers.data(name='out1', shape=[1], dtype='int32')
# 1. The type of input in select_output must Variable.
def test_input_type():
select_output(1, [out1], mask_int32)
self.assertRaises(TypeError, test_input_type)
# 2. The type of mask in select_output must be Variable.
def test_mask_type():
select_output(in1, [out1], mask=1)
self.assertRaises(TypeError, test_mask_type)
# 3. The dtype of mask in select_output must be int32 or int64.
def test_mask_dtype():
select_output(in1, [out1], mask=mask_float32)
self.assertRaises(TypeError, test_mask_dtype)
# 4. The type of mask in select_output must be list or tuple.
def test_outputs_type():
select_output(in1, out1, mask=mask_int32)
self.assertRaises(TypeError, test_outputs_type)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册