未验证 提交 0be4b04d 编写于 作者: G GaoWei8 提交者: GitHub

Api (lod_append) error message enhancement (#23541)

上级 81c4def9
...@@ -47,13 +47,13 @@ static inline framework::DDim ComputeAndCheckShape( ...@@ -47,13 +47,13 @@ static inline framework::DDim ComputeAndCheckShape(
is_runtime || (out_dims[j] > 0 && inputs_dims[i][j] > 0); is_runtime || (out_dims[j] > 0 && inputs_dims[i][j] > 0);
if (check_shape) { if (check_shape) {
// check all shape in run time // check all shape in run time
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(inputs_dims[0][j], inputs_dims[i][j],
inputs_dims[0][j], inputs_dims[i][j],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The shape of input[%d] must be equal to input[0]. " "The %d-th dimension of input[0] and input[%d] "
"is expected to be equal."
"But received input[0]'s shape = " "But received input[0]'s shape = "
"[%s], input[%d]'s shape = [%s].", "[%s], input[%d]'s shape = [%s].",
i, inputs_dims[0], i, inputs_dims[i])); j, i, inputs_dims[0], i, inputs_dims[i]));
} }
} }
} }
...@@ -79,9 +79,9 @@ class ConcatKernel : public framework::OpKernel<T> { ...@@ -79,9 +79,9 @@ class ConcatKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<framework::LoDTensor>("X"); auto ins = ctx.MultiInput<framework::LoDTensor>("X");
framework::LoDTensor* out = ctx.Output<framework::LoDTensor>("Out"); framework::LoDTensor* out = ctx.Output<framework::LoDTensor>("Out");
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(ins[0],
ins[0], platform::errors::NotFound( platform::errors::NotFound(
" The first input of concat should not be null.")); "The first input tensor is not initalized."));
auto axis = ctx.Attr<int>("axis"); auto axis = ctx.Attr<int>("axis");
bool need_resize_out_dims = false; bool need_resize_out_dims = false;
if (ctx.HasInput("AxisTensor")) { if (ctx.HasInput("AxisTensor")) {
...@@ -116,7 +116,9 @@ class ConcatKernel : public framework::OpKernel<T> { ...@@ -116,7 +116,9 @@ class ConcatKernel : public framework::OpKernel<T> {
platform::errors::Unimplemented( platform::errors::Unimplemented(
"The lod level of all input LoDTensors should be same. " "The lod level of all input LoDTensors should be same. "
"Maybe different lod level of input LoDTensors can concat," "Maybe different lod level of input LoDTensors can concat,"
" it is not supported currently.")); "it is not supported currently. The lod level of %dth input "
"is %d and first input is %d.",
i, ins[i]->lod().size(), lod_size_0));
} else { } else {
lod_size = 0; lod_size = 0;
break; break;
...@@ -181,9 +183,9 @@ class ConcatGradKernel : public framework::OpKernel<T> { ...@@ -181,9 +183,9 @@ class ConcatGradKernel : public framework::OpKernel<T> {
} }
} }
} }
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(ins[0],
ins[0], platform::errors::NotFound( platform::errors::NotFound(
"The first input of concat should not be null.")); "The first input tensor is not initalized."));
auto axis = ctx.Attr<int>("axis"); auto axis = ctx.Attr<int>("axis");
if (ctx.HasInput("AxisTensor")) { if (ctx.HasInput("AxisTensor")) {
......
...@@ -32,9 +32,9 @@ class LoDResetOp : public framework::OperatorWithKernel { ...@@ -32,9 +32,9 @@ class LoDResetOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
static_cast<int64_t>(level0.size()), 0, static_cast<int64_t>(level0.size()), 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"If Input(Y) not provided, the target lod should be " "If Input(Y) is not provided, the output's LoD should be "
"specified by attribute `target_lod`. But the size of " "specified by attribute 'target_lod'. But the size of "
"`target_lod` is 0.")); "'target_lod' is 0."));
} else if (ctx->IsRuntime()) { } else if (ctx->IsRuntime()) {
ctx->ShareLoD("Y", "Out"); ctx->ShareLoD("Y", "Out");
} }
......
...@@ -41,10 +41,10 @@ class LoDResetKernel : public framework::OpKernel<T> { ...@@ -41,10 +41,10 @@ class LoDResetKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
static_cast<int64_t>(last_level.back()), in->dims()[0], static_cast<int64_t>(last_level.back()), in->dims()[0],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The last value of `Y`'s last level LoD should be equal " "The last value of Input(Y)'s last level LoD should be equal "
"to the first dimension of `X`. But received the last value of " "to the first dimension of Input(X). But received the last "
"`Y`'s last level LoD is %d, the first dimension of `X` is " "value of Input(Y)'s last level LoD is %d, the first dimension "
"%d. ", "of Input(X) is %d.",
static_cast<int64_t>(last_level.back()), in->dims()[0])); static_cast<int64_t>(last_level.back()), in->dims()[0]));
out->set_lod(y_lod); out->set_lod(y_lod);
return; // early return, since lod already set return; // early return, since lod already set
...@@ -75,19 +75,16 @@ class LoDResetKernel : public framework::OpKernel<T> { ...@@ -75,19 +75,16 @@ class LoDResetKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
static_cast<int64_t>(level0.back()), in->dims()[0], static_cast<int64_t>(level0.back()), in->dims()[0],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The last value of `Target LoD`'s last level LoD should be equal " "The last value of 'Target LoD''s last level LoD should be equal "
"to the first dimension of `X`. But received the last value of " "to the first dimension of Input(X). But received the 'Target LoD' "
"`Target LoD`'s last level LoD is %d, the first dimension of `X` " "is %s, Input(X)'s shape is is %s.",
"is " framework::make_ddim(level0), in->dims()));
"%d. ",
static_cast<int64_t>(level0.back()), in->dims()[0]));
for (size_t i = 0; i < level0.size() - 1; ++i) { for (size_t i = 0; i < level0.size() - 1; ++i) {
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(level0[i + 1], level0[i],
level0[i + 1], level0[i],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Target LoD should be an ascending vector. But the %s element is " "'Target LoD' should be an ascending "
"%s and the %s element of Target LoD is %s.", "vector. But received the Target LoD is %s.",
i + 1, level0[i + 1], i, level0[i])); framework::make_ddim(level0)));
} }
// cast level0 to size_t // cast level0 to size_t
......
...@@ -6265,11 +6265,9 @@ def lod_reset(x, y=None, target_lod=None): ...@@ -6265,11 +6265,9 @@ def lod_reset(x, y=None, target_lod=None):
helper = LayerHelper("lod_reset", **locals()) helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None: if y is not None:
if y.lod_level > 0: check_type(y, 'y', (Variable), 'lod_reset')
check_variable_and_dtype( if y.lod_level == 0:
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'lod_reset') check_variable_and_dtype(y, 'y', ['int32'], 'lod_reset')
else:
check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'lod_reset')
helper.append_op( helper.append_op(
type="lod_reset", inputs={'X': x, type="lod_reset", inputs={'X': x,
'Y': y}, outputs={'Out': out}) 'Y': y}, outputs={'Out': out})
...@@ -6327,6 +6325,9 @@ def lod_append(x, level): ...@@ -6327,6 +6325,9 @@ def lod_append(x, level):
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)): if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
raise ValueError("Input(level) must be list, tuple or Variable.") raise ValueError("Input(level) must be list, tuple or Variable.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_append')
helper = LayerHelper("lod_append", **locals()) helper = LayerHelper("lod_append", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -6335,6 +6336,8 @@ def lod_append(x, level): ...@@ -6335,6 +6336,8 @@ def lod_append(x, level):
if isinstance(level, Variable): if isinstance(level, Variable):
inputs['Y'] = level inputs['Y'] = level
if level.lod_level == 0:
check_variable_and_dtype(level, 'level', ['int32'], 'lod_append')
else: else:
attrs['target_lod'] = level attrs['target_lod'] = level
helper.append_op( helper.append_op(
......
...@@ -3033,7 +3033,7 @@ class TestBook(LayerTest): ...@@ -3033,7 +3033,7 @@ class TestBook(LayerTest):
z = layers.lod_reset(x=x, y=y) z = layers.lod_reset(x=x, y=y)
self.assertTrue(z.lod_level == 2) self.assertTrue(z.lod_level == 2)
# case 2 # case 2
lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int64') lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int32')
z = layers.lod_reset(x=x, y=lod_tensor_in) z = layers.lod_reset(x=x, y=lod_tensor_in)
self.assertTrue(z.lod_level == 1) self.assertTrue(z.lod_level == 1)
# case 3 # case 3
......
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.op import Operator
from paddle.fluid.backward import append_backward
class TestLoDAppendAPI(unittest.TestCase):
def test_api(self, use_cuda=False):
main_program = Program()
with fluid.program_guard(main_program):
x = fluid.layers.data(name='x', shape=[6], dtype='float32')
result = fluid.layers.lod_append(x, [0, 2, 6])
x_i = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]).astype("float32")
for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
[out] = exe.run(fluid.default_main_program(),
feed={'x': x_i},
fetch_list=[result],
return_numpy=False)
self.assertEqual(out.recursive_sequence_lengths(), [[2, 4]])
class TestLodAppendOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program()):
def test_x_Variable():
# The input(x) must be Variable.
x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64")
level1 = [0, 2, 4]
fluid.layers.lod_append(x1, level1)
self.assertRaises(TypeError, fluid.layers.lod_append, x1,
level1)
def test_level_Variable():
# The input(level) must be Variable or list.
x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32')
level2 = 2
fluid.layers.lod_append(x2, level2)
self.assertRaises(TypeError, fluid.layers.lod_append, x2,
level2)
def test_x_dtype():
for dtype in ["bool", "float16"]:
x3 = fluid.layers.data(
name='x3_' + dtype, shape=[4], dtype=dtype)
level3 = fluid.layers.data(
name='level3', shape=[4], dtype='int32', lod_level=2)
self.assertRaises(TypeError, fluid.layers.lod_append, x3,
level3)
def test_level_dtype():
for dtype in ["bool", "float16", "float32", "float64", "int64"]:
x4 = fluid.layers.data(
name='x4_' + dtype, shape=[4], dtype='float32')
level4 = fluid.layers.data(
name='level4', shape=[4], dtype=dtype, lod_level=0)
self.assertRaises(TypeError, fluid.layers.lod_append, x4,
level4)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册