未验证 提交 66dc4ac7 编写于 作者: W WeiXin 提交者: GitHub

modify error message based on comments (#30189)

* modify error message based on comments

* edit code according to review.

* Correct spelling according to review.
上级 8700a7bd
...@@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter { ...@@ -197,9 +197,9 @@ class Pool2dOpConverter : public OpConverter {
engine_, Padding, *const_cast<nvinfer1::ITensor *>(input1), pre_pad, engine_, Padding, *const_cast<nvinfer1::ITensor *>(input1), pre_pad,
post_pad); post_pad);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pad_layer, pad_layer, platform::errors::Fatal(
platform::errors::Fatal( "Pad layer in poolOp converter could not be "
"pad layer in poolOp converter could not be created.")); "created. The pointer to pad layer is `NULL`."));
input1 = pad_layer->getOutput(0); input1 = pad_layer->getOutput(0);
} }
auto *pool_layer = TRT_ENGINE_ADD_LAYER( auto *pool_layer = TRT_ENGINE_ADD_LAYER(
......
...@@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -44,14 +44,18 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
const int M = x_dims[1]; const int M = x_dims[1];
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(x_dims.size(), 2,
"Input(X)'s rank must be 2.")); platform::errors::InvalidArgument(
"Expected input(X)'s dimension is 2. But received %d.",
x_dims.size()));
auto w_dims = ctx->GetInputDim("LSTMWeight"); auto w_dims = ctx->GetInputDim("LSTMWeight");
const int D = w_dims[1] / 4; const int D = w_dims[1] / 4;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
w_dims.size(), 2, w_dims.size(), 2,
platform::errors::InvalidArgument("Input(LSTMWeight)'s rank must be 2.")); platform::errors::InvalidArgument(
"Expected input(LSTMWeight)'s dimension is 2.But received %d.",
w_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
w_dims[0], D + M, w_dims[0], D + M,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -77,8 +81,11 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
if (ctx->HasInput("H0")) { if (ctx->HasInput("H0")) {
auto h_dims = ctx->GetInputDim("H0"); auto h_dims = ctx->GetInputDim("H0");
PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(
"Input(H0)'s rank must be 2.")); h_dims.size(), 2UL,
platform::errors::InvalidArgument(
"Expected input(H0)'s dimension is 2. But received %d.",
h_dims.size()));
if (ctx->IsRuntime() || if (ctx->IsRuntime() ||
(framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) { (framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) {
PADDLE_ENFORCE_EQ(h_dims, c_dims, PADDLE_ENFORCE_EQ(h_dims, c_dims,
...@@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -94,7 +101,9 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
"Input(AttentionWeight)'s rank must be 2.")); "Input(AttentionWeight)'s rank must be 2."));
PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"AttentionWeight shapes must be (%d + %d) * 1.", M, D)); "Expected `AttentionWeight` shape is [(%d + %d), 1]. "
"But received shape = [%d, 1], shape[0] is not %d.",
M, D, atten_w_dims[0], M + D));
PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, PADDLE_ENFORCE_EQ(atten_w_dims[1], 1,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"AttentionWeight shapes must be (%d + %d) * 1.", M, D)); "AttentionWeight shapes must be (%d + %d) * 1.", M, D));
......
...@@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { ...@@ -50,9 +50,11 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
y_dims.size(), 2UL, y_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( weight_dims.size(), 3UL,
"The input(Weight) must be a 3D tensor.")); platform::errors::InvalidArgument("Expected the input(Weight) is a 3D "
"tensor. But received %dD tensor.",
weight_dims.size()));
if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], y_dims[0], x_dims[0], y_dims[0],
......
...@@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel { ...@@ -43,7 +43,9 @@ class TargetAssignOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_dims.size(), 3, in_dims.size(), 3,
platform::errors::InvalidArgument("The rank of Input(X) must be 3.")); platform::errors::InvalidArgument(
"Expected the rank of Input(X) is 3. But received %d.",
in_dims.size()));
PADDLE_ENFORCE_EQ(mi_dims.size(), 2, PADDLE_ENFORCE_EQ(mi_dims.size(), 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The rank of Input(MatchIndices) must be 2.")); "The rank of Input(MatchIndices) must be 2."));
......
...@@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler ...@@ -54,9 +54,14 @@ class BatchNormMKLDNNHandler
const float epsilon = ctx.Attr<float>("epsilon"); const float epsilon = ctx.Attr<float>("epsilon");
const bool fuse_with_relu = ctx.Attr<bool>("fuse_with_relu"); const bool fuse_with_relu = ctx.Attr<bool>("fuse_with_relu");
std::vector<std::string> DataLayout_error_msg = {"kNHWC", "kNCHW",
"kAnyLayout", "kMKLDNN"};
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x->layout(), DataLayout::kMKLDNN, x->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument("Wrong layout set for X tensor")); platform::errors::InvalidArgument(
"Wrong layout set for X tensor. Expected layout is `kMKLDNN`, "
"But received %s.",
DataLayout_error_msg[static_cast<int>(DataLayout::kMKLDNN)]));
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
x->format(), MKLDNNMemoryFormat::undef, x->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for X tensor")); platform::errors::InvalidArgument("Wrong format set for X tensor"));
......
...@@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel<T> { ...@@ -374,9 +374,12 @@ class DNNLMatMulKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
if (ctx.HasAttr("head_number")) { if (ctx.HasAttr("head_number")) {
PADDLE_ENFORCE_EQ(ctx.Attr<int>("head_number"), 1, PADDLE_ENFORCE_EQ(
platform::errors::Unimplemented( ctx.Attr<int>("head_number"), 1,
"DNNL matmul doesn't support multiple heads.")); platform::errors::Unimplemented(
"DNNL matmul doesn't support multiple heads. Expected "
"head_number=1. But received `head_number` is %d",
ctx.Attr<int>("head_number")));
} }
platform::MKLDNNDeviceContext::tls().log_lib_version(); platform::MKLDNNDeviceContext::tls().log_lib_version();
ExecuteMatMul<T, T>(ctx); ExecuteMatMul<T, T>(ctx);
......
...@@ -54,7 +54,9 @@ class BlockingQueue { ...@@ -54,7 +54,9 @@ class BlockingQueue {
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
queue_.size(), capacity_, queue_.size(), capacity_,
platform::errors::PermissionDenied( platform::errors::PermissionDenied(
"The queue size cannot exceed the set queue capacity.")); "The queue size cannot exceed the set queue capacity. Expected "
"queue size is less than %d. But received %d",
capacity_, queue_.size()));
queue_.push_back(elem); queue_.push_back(elem);
receive_cv_.notify_one(); receive_cv_.notify_one();
return true; return true;
...@@ -73,7 +75,9 @@ class BlockingQueue { ...@@ -73,7 +75,9 @@ class BlockingQueue {
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
queue_.size(), capacity_, queue_.size(), capacity_,
platform::errors::PermissionDenied( platform::errors::PermissionDenied(
"The queue size cannot exceed the set queue capacity.")); "The queue size cannot exceed the set queue capacity. Expected "
"queue size is less than %d. But received %d",
capacity_, queue_.size()));
queue_.emplace_back(std::move(elem)); queue_.emplace_back(std::move(elem));
receive_cv_.notify_one(); receive_cv_.notify_one();
return true; return true;
......
...@@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase { ...@@ -122,10 +122,13 @@ class ReadOp : public framework::OperatorBase {
const std::vector<framework::proto::VarType::Type>& var_types = const std::vector<framework::proto::VarType::Type>& var_types =
reader->VarTypes(); reader->VarTypes();
const std::vector<bool>& need_check_feed = reader->NeedCheckFeed(); const std::vector<bool>& need_check_feed = reader->NeedCheckFeed();
PADDLE_ENFORCE_EQ(out_arg_names.size(), need_check_feed.size(), PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( out_arg_names.size(), need_check_feed.size(),
"output size of read_op and the number of fed " platform::errors::InvalidArgument(
"variables of reader do not match")); "Output size of read_op and the number of fed "
"variables of reader do not match. Received size of output is %d, "
"number of fed variables of reader is %d",
out_arg_names.size(), need_check_feed.size()));
for (size_t i = 0; i < out_arg_names.size(); ++i) { for (size_t i = 0; i < out_arg_names.size(); ++i) {
auto* out = auto* out =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册