未验证 提交 c9a88013 编写于 作者: Y yaoxuefeng 提交者: GitHub

enhance error messages of lookup_tale, merge_ids, data_norm (#27619)

* enhance error messages of lookup_tale, merge_ids, data_norm

* fix

* fix error msg in .cu
上级 9cc5603d
......@@ -388,7 +388,8 @@ class DataNormKernel<platform::CPUDeviceContext, T>
break;
}
default:
PADDLE_THROW("Unknown storage order: %d", data_layout);
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown storage order: %d", data_layout));
}
}
};
......@@ -464,7 +465,8 @@ class DataNormGradOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
const auto *var = ctx.InputVar(framework::GradVarName("Y"));
if (var == nullptr) {
PADDLE_THROW("can't find Y@GRAD");
PADDLE_THROW(platform::errors::InvalidArgument(
"Y@GRAD can not be found for computation"));
}
const Tensor *t = nullptr;
if (var->IsType<Tensor>()) {
......@@ -473,7 +475,8 @@ class DataNormGradOp : public framework::OperatorWithKernel {
t = &var->Get<LoDTensor>();
}
if (t == nullptr) {
PADDLE_THROW("can't find Y@GRAD");
PADDLE_THROW(platform::errors::InvalidArgument(
"Y@GRAD can not be found for computation"));
}
// TODO(pzelazko-intel): enable MKLDNN layout when it's ready
......@@ -696,7 +699,8 @@ class DataNormGradKernel<platform::CPUDeviceContext, T>
break;
}
default:
PADDLE_THROW("Unknown storage order: %s", data_layout_str);
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown storage order: %s", data_layout_str));
}
}
};
......
......@@ -16,6 +16,7 @@ limitations under the License. */
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
......@@ -30,7 +31,8 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext &ctx) const override {
auto place = ctx.GetPlace();
if (!platform::is_cpu_place(place)) {
PADDLE_THROW("MergeIds do not support GPU kernel");
PADDLE_THROW(platform::errors::InvalidArgument(
"MergeIds do not support GPU kernel"));
}
const auto ids = ctx.MultiInput<framework::LoDTensor>("Ids");
......
......@@ -161,11 +161,12 @@ class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
d_table_value->dims(), d_output_dims_2d));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
......
......@@ -159,9 +159,9 @@ class LookupTableGradKernel : public framework::OpKernel<T> {
auto *table_t = context.Input<SelectedRows>("W");
table_dim = table_t->value().dims();
} else {
PADDLE_THROW(
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter W of a LookupTable "
"must be either LoDTensor or SelectedRows");
"must be either LoDTensor or SelectedRows"));
}
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
......
......@@ -28,11 +28,15 @@ class LookupTableV2Op : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("W"), true,
"Input(W) of LookupTableV2Op should not be null.");
platform::errors::InvalidArgument(
"Input(W) of LookupTableV2Op should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Ids"), true,
"Input(Ids) of LookupTableV2Op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of LookupTableV2Op should not be null.");
platform::errors::InvalidArgument(
"Input(Ids) of LookupTableV2Op should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of LookupTableV2Op should not be null."));
auto table_dims = ctx->GetInputDim("W");
auto ids_dims = ctx->GetInputDim("Ids");
......@@ -40,10 +44,11 @@ class LookupTableV2Op : public framework::OperatorWithKernel {
VLOG(5) << "ids rank is " << ids_rank << std::endl;
PADDLE_ENFORCE_EQ(
table_dims.size(), 2,
platform::errors::InvalidArgument(
"ShapeError: The dimensions of the 'lookup table' must be 2. "
"But received lookup table's dimensions = %d, "
"lookup table's shape = [%s].",
table_dims.size(), table_dims);
table_dims.size(), table_dims));
auto output_dims = framework::vectorize(ids_dims);
output_dims.push_back(table_dims[1]);
......
......@@ -191,11 +191,12 @@ class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
d_table_value->dims(), d_output_dims_2d));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
......
......@@ -74,16 +74,18 @@ class LookupTableV2Kernel : public framework::OpKernel<T> {
} else {
PADDLE_ENFORCE_LT(
ids[i], row_number,
platform::errors::InvalidArgument(
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input "
"value.",
row_number, ids[i]);
row_number, ids[i]));
PADDLE_ENFORCE_GE(
ids[i], 0,
platform::errors::InvalidArgument(
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input "
"value.",
row_number, ids[i]);
row_number, ids[i]));
memcpy(output + i * row_width, table + ids[i] * row_width,
row_width * sizeof(T));
}
......@@ -101,13 +103,16 @@ class LookupTableV2Kernel : public framework::OpKernel<T> {
} else {
PADDLE_ENFORCE_GE(
ids[i], 0,
platform::errors::InvalidArgument(
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0. But received %ld",
ids[i]);
ids[i]));
auto id_index = table_t.Index(ids[i]);
PADDLE_ENFORCE_GE(id_index, 0,
PADDLE_ENFORCE_GE(
id_index, 0,
platform::errors::InvalidArgument(
"the input key should be exists. But received %d.",
id_index);
id_index));
blas.VCOPY(row_width, table + id_index * row_width,
output + i * row_width);
}
......@@ -128,9 +133,9 @@ class LookupTableV2GradKernel : public framework::OpKernel<T> {
auto *table_t = context.Input<SelectedRows>("W");
table_dim = table_t->value().dims();
} else {
PADDLE_THROW(
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter W of a LookupTableV2 "
"must be either LoDTensor or SelectedRows");
"must be either LoDTensor or SelectedRows"));
}
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
......@@ -170,11 +175,12 @@ class LookupTableV2GradKernel : public framework::OpKernel<T> {
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
d_table_value->dims(), d_output_dims_2d));
memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel());
} else {
......@@ -211,14 +217,18 @@ class LookupTableV2GradKernel : public framework::OpKernel<T> {
} else {
PADDLE_ENFORCE_LT(
ids_data[i], N,
platform::errors::InvalidArgument(
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, ids_data[i]);
"expected >= 0 and < %ld, but got %ld. Please check input "
"value.",
N, ids_data[i]));
PADDLE_ENFORCE_GE(
ids_data[i], 0,
platform::errors::InvalidArgument(
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, ids_data[i]);
"expected >= 0 and < %ld, but got %ld. Please check input "
"value.",
N, ids_data[i]));
for (int j = 0; j < D; ++j) {
d_table_data[ids_data[i] * D + j] += d_output_data[i * D + j];
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册