未验证 提交 289edf39 编写于 作者: L LutaoChu 提交者: GitHub

elementwise ops error message enhancement,the python error message had add before

Those ops add the kernel message enhancement, as follows
paddle.fluid.layers.elementwise_add	
paddle.fluid.layers.elementwise_div
paddle.fluid.layers.elementwise_floordiv
paddle.fluid.layers.elementwise_max	
paddle.fluid.layers.elementwise_min	
paddle.fluid.layers.elementwise_mod	
paddle.fluid.layers.elementwise_mul	
paddle.fluid.layers.elementwise_pow	
paddle.fluid.layers.elementwise_sub
上级 e7e7cb5f
......@@ -94,15 +94,19 @@ class ElementwiseMulKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE(x_var != nullptr,
"Cannot get input Variable X, variable name = %s",
ctx.InputName("X"));
PADDLE_ENFORCE_EQ(x_var != nullptr, true,
platform::errors::InvalidArgument(
"Cannot get input Variable X, Variable name = %s.",
ctx.InputName("X")));
auto* y = ctx.Input<framework::LoDTensor>("Y");
framework::Tensor x, *z;
if (x_var->IsType<framework::SelectedRows>()) {
PADDLE_ENFORCE(y->dims().size() == 1 && y->dims()[0] == 1,
"For elementwise_op, if X is Sparse, Y must be scalar.");
PADDLE_ENFORCE_EQ(y->dims().size() == 1 && y->dims()[0] == 1, true,
platform::errors::InvalidArgument(
"For elementwise_op, if X is Sparse, Y must be "
"scalar. But reveived the size of Y = %s.",
y->dims().size()));
auto& x_sele = x_var->Get<framework::SelectedRows>();
auto out_sele = ctx.Output<framework::SelectedRows>("Out");
x = x_sele.value();
......@@ -115,8 +119,10 @@ class ElementwiseMulKernel : public framework::OpKernel<T> {
x = x_var->Get<framework::LoDTensor>();
z = ctx.Output<framework::LoDTensor>("Out");
} else {
PADDLE_THROW("X's type[%s] is not supported by elementwise_op.",
framework::ToTypeName(x_var->Type()));
PADDLE_THROW(platform::errors::InvalidArgument(
"X's type[%s] is not supported by elementwise_op. X's type should be "
"LoDTensor or SelectedRows.",
framework::ToTypeName(x_var->Type())));
}
z->mutable_data<T>(ctx.GetPlace());
......
......@@ -38,35 +38,40 @@ class ElementwiseOp : public framework::OperatorWithKernel {
using Tensor = framework::Tensor;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of elementwise op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true,
"Input(Y) of elementwise op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of elementwise op should not be null.");
PADDLE_ENFORCE(
ctx->GetInputsVarType("Y").front() ==
framework::proto::VarType::LOD_TENSOR,
"The input var's type should be LoDTensor, but the received is %s [%s]",
ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front());
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");
PADDLE_ENFORCE_EQ(
ctx->GetInputsVarType("Y").front(),
framework::proto::VarType::LOD_TENSOR,
platform::errors::InvalidArgument(
"The input var's type should be LoDTensor, but the "
"received is %s [%s].",
ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
if (ctx->GetInputsVarType("X").front() ==
framework::proto::VarType::SELECTED_ROWS) {
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Y").size(), 1u,
"ShapeError: For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
"), Y must be scalar. But reveived the dimension of Y = %s",
ctx->GetInputDim("Y").size());
platform::errors::InvalidArgument(
"For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
"), Y must be scalar, the size of Y should be 1. "
"But reveived the size of Y = %s.",
ctx->GetInputDim("Y").size()));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Y")[0], 1,
"ShapeError: For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
"), Y must be scalar. But reveived the first dimension of Y = %s",
ctx->GetInputDim("Y")[0]);
platform::errors::InvalidArgument(
"For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
"), Y must be scalar, the first dimension of Y should be 1. "
"But reveived the first dimension of Y = %s.",
ctx->GetInputDim("Y")[0]));
} else if (ctx->GetInputsVarType("X").front() !=
framework::proto::VarType::LOD_TENSOR) {
PADDLE_THROW("X's type[%s] is not supported by elementwise_op.",
ctx->GetInputsVarType("X").front());
PADDLE_THROW(platform::errors::InvalidArgument(
"Input X's type[%s] is not supported by elementwise_op. Please set "
"its type to LOD_TENSOR.",
ctx->GetInputsVarType("X").front()));
}
if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
......@@ -212,9 +217,9 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
auto out_grad_name = framework::GradVarName("Out");
PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true, "Input(Y) should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput(out_grad_name), true,
"Input(Out@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
"ElementwiseOpGrad");
auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(x_grad_name)) {
......@@ -306,12 +311,12 @@ class ElementwiseOpDoubleGradWithoutDXDY
const framework::ExecutionContext &ctx) const override {
framework::proto::VarType::Type input_data_type;
if (ctx.HasInput("DDX") == false) {
PADDLE_ENFORCE_EQ(ctx.HasInput("DDY"), true,
"Input(DDY) should not be null");
OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
"ElementwiseOpDoubleGradWithoutDXDY");
input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
} else if (ctx.HasInput("DDY") == false) {
PADDLE_ENFORCE_EQ(ctx.HasInput("DDX"), true,
"Input(DDX) should not be null");
OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
"ElementwiseOpDoubleGradWithoutDXDY");
input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
} else {
input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
......
......@@ -32,9 +32,9 @@ limitations under the License. */
#define __h2div h2div
#endif
#define DIV_ERROR_INFO \
"InvalidArgumentError: Integer division by zero encountered in " \
"divide.Please check.\n"
#define DIV_ERROR_INFO \
"InvalidArgumentError: Integer division by zero encountered in divide. " \
"Please check.\n"
namespace paddle {
namespace operators {
......
......@@ -76,12 +76,13 @@ inline void get_mid_dims(const framework::DDim &x_dims,
}
for (int i = 0; i < y_dims.size(); ++i) {
if (x_dims[i + axis] != y_dims[i]) {
PADDLE_ENFORCE(y_dims[i] == 1 || x_dims[i + axis] == 1,
"ShapeError: broadcast dimension mismatch. Operands "
"could not be broadcast together with the shape of "
"X = [%s] and the shape of Y = [%s]. Received [%d] "
"in X is not equal to [%d] in Y",
x_dims, y_dims, x_dims[i + axis], y_dims[i]);
PADDLE_ENFORCE_EQ(y_dims[i] == 1 || x_dims[i + axis] == 1, true,
platform::errors::InvalidArgument(
"Broadcast dimension mismatch. Operands "
"could not be broadcast together with the shape of "
"X = [%s] and the shape of Y = [%s]. Received [%d] "
"in X is not equal to [%d] in Y.",
x_dims, y_dims, x_dims[i + axis], y_dims[i]));
*is_run_common_broadcast = 1;
return;
}
......@@ -119,8 +120,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims,
int *x_dims_array, int *y_dims_array,
int *out_dims_array, const int max_dim,
const int axis) {
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_GE(
axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
if (x_dims.size() > y_dims.size()) {
std::fill(y_dims_array, y_dims_array + axis, 1);
if (axis + y_dims.size() < max_dim) {
......@@ -138,13 +146,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims,
}
for (int i = 0; i < max_dim; i++) {
PADDLE_ENFORCE(x_dims_array[i] == y_dims_array[i] || x_dims_array[i] <= 1 ||
y_dims_array[i] <= 1,
"ShapeError: broadcast dimension mismatch. Operands could "
"not be broadcast together with the shape of X = [%s] and "
"the shape of Y = [%s]. Received [%d] in X is not equal to "
"[%d] in Y at i:%d",
x_dims, y_dims, x_dims_array[i], y_dims_array[i], i);
PADDLE_ENFORCE_EQ(
x_dims_array[i] == y_dims_array[i] || x_dims_array[i] <= 1 ||
y_dims_array[i] <= 1,
true, platform::errors::InvalidArgument(
"Broadcast dimension mismatch. Operands could "
"not be broadcast together with the shape of X = [%s] and "
"the shape of Y = [%s]. Received [%d] in X is not equal to "
"[%d] in Y at i:%d.",
x_dims, y_dims, x_dims_array[i], y_dims_array[i], i));
if ((x_dims_array[i] > 1 || y_dims_array[i] > 1) ||
(x_dims_array[i] == 1 && y_dims_array[i] == 1)) {
out_dims_array[i] = std::max(x_dims_array[i], y_dims_array[i]);
......@@ -1690,8 +1700,15 @@ void ElemwiseGradComputeWithBroadcast(
}
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_GE(
axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
int pre, n, post, is_run_common_broadcast, axis_trim = 0;
if (is_xsize_larger) {
......@@ -1758,8 +1775,15 @@ void CommonElementwiseBroadcastForward(
int axis, const bool is_xsize_larger = true) {
int max_dim = std::max(x_dims.size(), y_dims.size());
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_GE(
axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
std::vector<int> x_dims_array(max_dim);
std::vector<int> y_dims_array(max_dim);
std::vector<int> out_dims_array(max_dim);
......@@ -1848,8 +1872,15 @@ void ElementwiseComputeEx(const framework::ExecutionContext &ctx,
}
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis);
PADDLE_ENFORCE_GE(
axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
int pre, n, post, is_run_common_broadcast, axis_trim = 0;
if (is_xsize_larger) {
......@@ -2723,7 +2754,9 @@ void FusedElemwiseAndActGradComputeEx(
const framework::DDim &x_dim = x->dims();
const framework::DDim &y_dim = y->dims();
if (UseIntermediateOut) {
PADDLE_ENFORCE(intermediate_out, "intermediate_out should not be nullptr");
PADDLE_ENFORCE_NOT_NULL(
intermediate_out,
platform::errors::InvalidArgument("Intermediate out is null pointer."));
}
if (x_dim == y_dim) {
FusedElemwiseAndActGradComputeNoBroadcast<
......@@ -2768,9 +2801,11 @@ void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx,
framework::Tensor *out,
framework::Tensor *intermediate_out) {
if (KeepIntermediateOut) {
PADDLE_ENFORCE(intermediate_out,
"The save_intermediate_out is opened, "
"intermediate_out should not be nullptr.");
PADDLE_ENFORCE_NOT_NULL(
intermediate_out,
platform::errors::InvalidArgument(
"The save_intermediate_out is opened, intermediate "
"out is null pointer."));
}
const framework::DDim &x_dim = x.dims();
......
......@@ -41,9 +41,10 @@ class ElementwisePowKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& ctx) const override {
using Tensor = framework::LoDTensor;
auto* x = ctx.Input<Tensor>("X");
PADDLE_ENFORCE(x != nullptr,
"Cannot get input Variable X, variable name = %s",
ctx.InputName("X"));
PADDLE_ENFORCE_EQ(x != nullptr, true,
platform::errors::NotFound(
"Cannot get input Variable X, Variable name = %s",
ctx.InputName("X")));
auto* y = ctx.Input<Tensor>("Y");
auto* z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace());
......
......@@ -76,7 +76,9 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel<T> {
get_mid_dims(x_dims, y_dims_untrimmed, axis, &pre, &num, &post,
&is_run_common_broadcast);
if (post == 1) PADDLE_THROW("Not implemented when post is 1");
if (post == 1)
PADDLE_THROW(
platform::errors::Unimplemented("Not implemented when post is 1."));
const int64_t n = x_dims[0];
const int64_t c = x_dims[1];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册