未验证 提交 66dc4ac7 编写于 作者: W WeiXin 提交者: GitHub

modify error message based on comments (#30189)

* modify error message based on comments

* edit code according to review.

* Correct spelling according to review.
上级 8700a7bd
......@@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter {
engine_, Padding, *const_cast<nvinfer1::ITensor *>(input1), pre_pad,
post_pad);
PADDLE_ENFORCE_NOT_NULL(
pad_layer,
platform::errors::Fatal(
"pad layer in poolOp converter could not be created."));
pad_layer, platform::errors::Fatal(
"Pad layer in poolOp converter could not be "
"created. The pointer to pad layer is `NULL`."));
input1 = pad_layer->getOutput(0);
}
auto *pool_layer = TRT_ENGINE_ADD_LAYER(
......
......@@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
auto x_dims = ctx->GetInputDim("X");
const int M = x_dims[1];
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
"Input(X)'s rank must be 2."));
PADDLE_ENFORCE_EQ(x_dims.size(), 2,
platform::errors::InvalidArgument(
"Expected input(X)'s dimension is 2. But received %d.",
x_dims.size()));
auto w_dims = ctx->GetInputDim("LSTMWeight");
const int D = w_dims[1] / 4;
PADDLE_ENFORCE_EQ(
w_dims.size(), 2,
platform::errors::InvalidArgument("Input(LSTMWeight)'s rank must be 2."));
platform::errors::InvalidArgument(
"Expected input(LSTMWeight)'s dimension is 2.But received %d.",
w_dims.size()));
PADDLE_ENFORCE_EQ(
w_dims[0], D + M,
platform::errors::InvalidArgument(
......@@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
if (ctx->HasInput("H0")) {
auto h_dims = ctx->GetInputDim("H0");
PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, platform::errors::InvalidArgument(
"Input(H0)'s rank must be 2."));
PADDLE_ENFORCE_EQ(
h_dims.size(), 2UL,
platform::errors::InvalidArgument(
"Expected input(H0)'s dimension is 2. But received %d.",
h_dims.size()));
if (ctx->IsRuntime() ||
(framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) {
PADDLE_ENFORCE_EQ(h_dims, c_dims,
......@@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
"Input(AttentionWeight)'s rank must be 2."));
PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D,
platform::errors::InvalidArgument(
"AttentionWeight shapes must be (%d + %d) * 1.", M, D));
"Expected `AttentionWeight` shape is [(%d + %d), 1]. "
"But received shape = [%d, 1], shape[0] is not %d.",
M, D, atten_w_dims[0], M + D));
PADDLE_ENFORCE_EQ(atten_w_dims[1], 1,
platform::errors::InvalidArgument(
"AttentionWeight shapes must be (%d + %d) * 1.", M, D));
......
......@@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(
y_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL,
platform::errors::InvalidArgument(
"The input(Weight) must be a 3D tensor."));
PADDLE_ENFORCE_EQ(
weight_dims.size(), 3UL,
platform::errors::InvalidArgument("Expected the input(Weight) is a 3D "
"tensor. But received %dD tensor.",
weight_dims.size()));
if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) {
PADDLE_ENFORCE_EQ(
x_dims[0], y_dims[0],
......
......@@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(
in_dims.size(), 3,
platform::errors::InvalidArgument("The rank of Input(X) must be 3."));
platform::errors::InvalidArgument(
"Expected the rank of Input(X) is 3. But received %d.",
in_dims.size()));
PADDLE_ENFORCE_EQ(mi_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(MatchIndices) must be 2."));
......
......@@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler
const float epsilon = ctx.Attr<float>("epsilon");
const bool fuse_with_relu = ctx.Attr<bool>("fuse_with_relu");
std::vector<std::string> DataLayout_error_msg = {"kNHWC", "kNCHW",
"kAnyLayout", "kMKLDNN"};
PADDLE_ENFORCE_EQ(
x->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument("Wrong layout set for X tensor"));
platform::errors::InvalidArgument(
"Wrong layout set for X tensor. Expected layout is `kMKLDNN`, "
"But received %s.",
DataLayout_error_msg[static_cast<int>(DataLayout::kMKLDNN)]));
PADDLE_ENFORCE_NE(
x->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for X tensor"));
......
......@@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
if (ctx.HasAttr("head_number")) {
PADDLE_ENFORCE_EQ(ctx.Attr<int>("head_number"), 1,
PADDLE_ENFORCE_EQ(
ctx.Attr<int>("head_number"), 1,
platform::errors::Unimplemented(
"DNNL matmul doesn't support multiple heads."));
"DNNL matmul doesn't support multiple heads. Expected "
"head_number=1. But received `head_number` is %d",
ctx.Attr<int>("head_number")));
}
platform::MKLDNNDeviceContext::tls().log_lib_version();
ExecuteMatMul<T, T>(ctx);
......
......@@ -54,7 +54,9 @@ class BlockingQueue {
PADDLE_ENFORCE_LT(
queue_.size(), capacity_,
platform::errors::PermissionDenied(
"The queue size cannot exceed the set queue capacity."));
"The queue size cannot exceed the set queue capacity. Expected "
"queue size is less than %d. But received %d",
capacity_, queue_.size()));
queue_.push_back(elem);
receive_cv_.notify_one();
return true;
......@@ -73,7 +75,9 @@ class BlockingQueue {
PADDLE_ENFORCE_LT(
queue_.size(), capacity_,
platform::errors::PermissionDenied(
"The queue size cannot exceed the set queue capacity."));
"The queue size cannot exceed the set queue capacity. Expected "
"queue size is less than %d. But received %d",
capacity_, queue_.size()));
queue_.emplace_back(std::move(elem));
receive_cv_.notify_one();
return true;
......
......@@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase {
const std::vector<framework::proto::VarType::Type>& var_types =
reader->VarTypes();
const std::vector<bool>& need_check_feed = reader->NeedCheckFeed();
PADDLE_ENFORCE_EQ(out_arg_names.size(), need_check_feed.size(),
PADDLE_ENFORCE_EQ(
out_arg_names.size(), need_check_feed.size(),
platform::errors::InvalidArgument(
"output size of read_op and the number of fed "
"variables of reader do not match"));
"Output size of read_op and the number of fed "
"variables of reader do not match. Received size of output is %d, "
"number of fed variables of reader is %d",
out_arg_names.size(), need_check_feed.size()));
for (size_t i = 0; i < out_arg_names.size(); ++i) {
auto* out =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册