未验证 提交 289edf39 编写于 作者: L LutaoChu 提交者: GitHub

elementwise ops error message enhancement,the python error message had add before

Those ops add the kernel message enhancement, as follows
paddle.fluid.layers.elementwise_add	
paddle.fluid.layers.elementwise_div
paddle.fluid.layers.elementwise_floordiv
paddle.fluid.layers.elementwise_max	
paddle.fluid.layers.elementwise_min	
paddle.fluid.layers.elementwise_mod	
paddle.fluid.layers.elementwise_mul	
paddle.fluid.layers.elementwise_pow	
paddle.fluid.layers.elementwise_sub
上级 e7e7cb5f
...@@ -94,15 +94,19 @@ class ElementwiseMulKernel : public framework::OpKernel<T> { ...@@ -94,15 +94,19 @@ class ElementwiseMulKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto x_var = ctx.InputVar("X"); auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE(x_var != nullptr, PADDLE_ENFORCE_EQ(x_var != nullptr, true,
"Cannot get input Variable X, variable name = %s", platform::errors::InvalidArgument(
ctx.InputName("X")); "Cannot get input Variable X, Variable name = %s.",
ctx.InputName("X")));
auto* y = ctx.Input<framework::LoDTensor>("Y"); auto* y = ctx.Input<framework::LoDTensor>("Y");
framework::Tensor x, *z; framework::Tensor x, *z;
if (x_var->IsType<framework::SelectedRows>()) { if (x_var->IsType<framework::SelectedRows>()) {
PADDLE_ENFORCE(y->dims().size() == 1 && y->dims()[0] == 1, PADDLE_ENFORCE_EQ(y->dims().size() == 1 && y->dims()[0] == 1, true,
"For elementwise_op, if X is Sparse, Y must be scalar."); platform::errors::InvalidArgument(
"For elementwise_op, if X is Sparse, Y must be "
"scalar. But reveived the size of Y = %s.",
y->dims().size()));
auto& x_sele = x_var->Get<framework::SelectedRows>(); auto& x_sele = x_var->Get<framework::SelectedRows>();
auto out_sele = ctx.Output<framework::SelectedRows>("Out"); auto out_sele = ctx.Output<framework::SelectedRows>("Out");
x = x_sele.value(); x = x_sele.value();
...@@ -115,8 +119,10 @@ class ElementwiseMulKernel : public framework::OpKernel<T> { ...@@ -115,8 +119,10 @@ class ElementwiseMulKernel : public framework::OpKernel<T> {
x = x_var->Get<framework::LoDTensor>(); x = x_var->Get<framework::LoDTensor>();
z = ctx.Output<framework::LoDTensor>("Out"); z = ctx.Output<framework::LoDTensor>("Out");
} else { } else {
PADDLE_THROW("X's type[%s] is not supported by elementwise_op.", PADDLE_THROW(platform::errors::InvalidArgument(
framework::ToTypeName(x_var->Type())); "X's type[%s] is not supported by elementwise_op. X's type should be "
"LoDTensor or SelectedRows.",
framework::ToTypeName(x_var->Type())));
} }
z->mutable_data<T>(ctx.GetPlace()); z->mutable_data<T>(ctx.GetPlace());
......
...@@ -38,35 +38,40 @@ class ElementwiseOp : public framework::OperatorWithKernel { ...@@ -38,35 +38,40 @@ class ElementwiseOp : public framework::OperatorWithKernel {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
"Input(X) of elementwise op should not be null."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true, OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");
"Input(Y) of elementwise op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, PADDLE_ENFORCE_EQ(
"Output(Out) of elementwise op should not be null."); ctx->GetInputsVarType("Y").front(),
framework::proto::VarType::LOD_TENSOR,
PADDLE_ENFORCE( platform::errors::InvalidArgument(
ctx->GetInputsVarType("Y").front() == "The input var's type should be LoDTensor, but the "
framework::proto::VarType::LOD_TENSOR, "received is %s [%s].",
"The input var's type should be LoDTensor, but the received is %s [%s]", ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front());
if (ctx->GetInputsVarType("X").front() == if (ctx->GetInputsVarType("X").front() ==
framework::proto::VarType::SELECTED_ROWS) { framework::proto::VarType::SELECTED_ROWS) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Y").size(), 1u, ctx->GetInputDim("Y").size(), 1u,
"ShapeError: For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS" platform::errors::InvalidArgument(
"), Y must be scalar. But reveived the dimension of Y = %s", "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
ctx->GetInputDim("Y").size()); "), Y must be scalar, the size of Y should be 1. "
"But reveived the size of Y = %s.",
ctx->GetInputDim("Y").size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Y")[0], 1, ctx->GetInputDim("Y")[0], 1,
"ShapeError: For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS" platform::errors::InvalidArgument(
"), Y must be scalar. But reveived the first dimension of Y = %s", "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
ctx->GetInputDim("Y")[0]); "), Y must be scalar, the first dimension of Y should be 1. "
"But reveived the first dimension of Y = %s.",
ctx->GetInputDim("Y")[0]));
} else if (ctx->GetInputsVarType("X").front() != } else if (ctx->GetInputsVarType("X").front() !=
framework::proto::VarType::LOD_TENSOR) { framework::proto::VarType::LOD_TENSOR) {
PADDLE_THROW("X's type[%s] is not supported by elementwise_op.", PADDLE_THROW(platform::errors::InvalidArgument(
ctx->GetInputsVarType("X").front()); "Input X's type[%s] is not supported by elementwise_op. Please set "
"its type to LOD_TENSOR.",
ctx->GetInputsVarType("X").front()));
} }
if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) { if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
...@@ -212,9 +217,9 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { ...@@ -212,9 +217,9 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
auto out_grad_name = framework::GradVarName("Out"); auto out_grad_name = framework::GradVarName("Out");
PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true, "Input(Y) should not be null."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput(out_grad_name), true, OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
"Input(Out@GRAD) should not be null."); "ElementwiseOpGrad");
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y"); auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(x_grad_name)) { if (ctx->HasOutput(x_grad_name)) {
...@@ -306,12 +311,12 @@ class ElementwiseOpDoubleGradWithoutDXDY ...@@ -306,12 +311,12 @@ class ElementwiseOpDoubleGradWithoutDXDY
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
framework::proto::VarType::Type input_data_type; framework::proto::VarType::Type input_data_type;
if (ctx.HasInput("DDX") == false) { if (ctx.HasInput("DDX") == false) {
PADDLE_ENFORCE_EQ(ctx.HasInput("DDY"), true, OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
"Input(DDY) should not be null"); "ElementwiseOpDoubleGradWithoutDXDY");
input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY"); input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
} else if (ctx.HasInput("DDY") == false) { } else if (ctx.HasInput("DDY") == false) {
PADDLE_ENFORCE_EQ(ctx.HasInput("DDX"), true, OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
"Input(DDX) should not be null"); "ElementwiseOpDoubleGradWithoutDXDY");
input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX"); input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
} else { } else {
input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX"); input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
......
...@@ -32,9 +32,9 @@ limitations under the License. */ ...@@ -32,9 +32,9 @@ limitations under the License. */
#define __h2div h2div #define __h2div h2div
#endif #endif
#define DIV_ERROR_INFO \ #define DIV_ERROR_INFO \
"InvalidArgumentError: Integer division by zero encountered in " \ "InvalidArgumentError: Integer division by zero encountered in divide. " \
"divide.Please check.\n" "Please check.\n"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -76,12 +76,13 @@ inline void get_mid_dims(const framework::DDim &x_dims, ...@@ -76,12 +76,13 @@ inline void get_mid_dims(const framework::DDim &x_dims,
} }
for (int i = 0; i < y_dims.size(); ++i) { for (int i = 0; i < y_dims.size(); ++i) {
if (x_dims[i + axis] != y_dims[i]) { if (x_dims[i + axis] != y_dims[i]) {
PADDLE_ENFORCE(y_dims[i] == 1 || x_dims[i + axis] == 1, PADDLE_ENFORCE_EQ(y_dims[i] == 1 || x_dims[i + axis] == 1, true,
"ShapeError: broadcast dimension mismatch. Operands " platform::errors::InvalidArgument(
"could not be broadcast together with the shape of " "Broadcast dimension mismatch. Operands "
"X = [%s] and the shape of Y = [%s]. Received [%d] " "could not be broadcast together with the shape of "
"in X is not equal to [%d] in Y", "X = [%s] and the shape of Y = [%s]. Received [%d] "
x_dims, y_dims, x_dims[i + axis], y_dims[i]); "in X is not equal to [%d] in Y.",
x_dims, y_dims, x_dims[i + axis], y_dims[i]));
*is_run_common_broadcast = 1; *is_run_common_broadcast = 1;
return; return;
} }
...@@ -119,8 +120,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims, ...@@ -119,8 +120,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims,
int *x_dims_array, int *y_dims_array, int *x_dims_array, int *y_dims_array,
int *out_dims_array, const int max_dim, int *out_dims_array, const int max_dim,
const int axis) { const int axis) {
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); PADDLE_ENFORCE_GE(
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
if (x_dims.size() > y_dims.size()) { if (x_dims.size() > y_dims.size()) {
std::fill(y_dims_array, y_dims_array + axis, 1); std::fill(y_dims_array, y_dims_array + axis, 1);
if (axis + y_dims.size() < max_dim) { if (axis + y_dims.size() < max_dim) {
...@@ -138,13 +146,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims, ...@@ -138,13 +146,15 @@ inline void GetBroadcastDimsArrays(const framework::DDim &x_dims,
} }
for (int i = 0; i < max_dim; i++) { for (int i = 0; i < max_dim; i++) {
PADDLE_ENFORCE(x_dims_array[i] == y_dims_array[i] || x_dims_array[i] <= 1 || PADDLE_ENFORCE_EQ(
y_dims_array[i] <= 1, x_dims_array[i] == y_dims_array[i] || x_dims_array[i] <= 1 ||
"ShapeError: broadcast dimension mismatch. Operands could " y_dims_array[i] <= 1,
"not be broadcast together with the shape of X = [%s] and " true, platform::errors::InvalidArgument(
"the shape of Y = [%s]. Received [%d] in X is not equal to " "Broadcast dimension mismatch. Operands could "
"[%d] in Y at i:%d", "not be broadcast together with the shape of X = [%s] and "
x_dims, y_dims, x_dims_array[i], y_dims_array[i], i); "the shape of Y = [%s]. Received [%d] in X is not equal to "
"[%d] in Y at i:%d.",
x_dims, y_dims, x_dims_array[i], y_dims_array[i], i));
if ((x_dims_array[i] > 1 || y_dims_array[i] > 1) || if ((x_dims_array[i] > 1 || y_dims_array[i] > 1) ||
(x_dims_array[i] == 1 && y_dims_array[i] == 1)) { (x_dims_array[i] == 1 && y_dims_array[i] == 1)) {
out_dims_array[i] = std::max(x_dims_array[i], y_dims_array[i]); out_dims_array[i] = std::max(x_dims_array[i], y_dims_array[i]);
...@@ -1690,8 +1700,15 @@ void ElemwiseGradComputeWithBroadcast( ...@@ -1690,8 +1700,15 @@ void ElemwiseGradComputeWithBroadcast(
} }
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); PADDLE_ENFORCE_GE(
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
int pre, n, post, is_run_common_broadcast, axis_trim = 0; int pre, n, post, is_run_common_broadcast, axis_trim = 0;
if (is_xsize_larger) { if (is_xsize_larger) {
...@@ -1758,8 +1775,15 @@ void CommonElementwiseBroadcastForward( ...@@ -1758,8 +1775,15 @@ void CommonElementwiseBroadcastForward(
int axis, const bool is_xsize_larger = true) { int axis, const bool is_xsize_larger = true) {
int max_dim = std::max(x_dims.size(), y_dims.size()); int max_dim = std::max(x_dims.size(), y_dims.size());
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); PADDLE_ENFORCE_GE(
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
std::vector<int> x_dims_array(max_dim); std::vector<int> x_dims_array(max_dim);
std::vector<int> y_dims_array(max_dim); std::vector<int> y_dims_array(max_dim);
std::vector<int> out_dims_array(max_dim); std::vector<int> out_dims_array(max_dim);
...@@ -1848,8 +1872,15 @@ void ElementwiseComputeEx(const framework::ExecutionContext &ctx, ...@@ -1848,8 +1872,15 @@ void ElementwiseComputeEx(const framework::ExecutionContext &ctx,
} }
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_GE(axis, 0, "Axis should be in range [0, %d)", axis); PADDLE_ENFORCE_GE(
PADDLE_ENFORCE_LT(axis, max_dim, "Axis should be in range [0, %d)", axis); axis, 0,
platform::errors::InvalidArgument(
"Axis should be great than or equal to 0, but received axis is %d.",
axis));
PADDLE_ENFORCE_LT(axis, max_dim,
platform::errors::InvalidArgument(
"Axis should be less than %d, but received axis is %d.",
max_dim, axis));
int pre, n, post, is_run_common_broadcast, axis_trim = 0; int pre, n, post, is_run_common_broadcast, axis_trim = 0;
if (is_xsize_larger) { if (is_xsize_larger) {
...@@ -2723,7 +2754,9 @@ void FusedElemwiseAndActGradComputeEx( ...@@ -2723,7 +2754,9 @@ void FusedElemwiseAndActGradComputeEx(
const framework::DDim &x_dim = x->dims(); const framework::DDim &x_dim = x->dims();
const framework::DDim &y_dim = y->dims(); const framework::DDim &y_dim = y->dims();
if (UseIntermediateOut) { if (UseIntermediateOut) {
PADDLE_ENFORCE(intermediate_out, "intermediate_out should not be nullptr"); PADDLE_ENFORCE_NOT_NULL(
intermediate_out,
platform::errors::InvalidArgument("Intermediate out is null pointer."));
} }
if (x_dim == y_dim) { if (x_dim == y_dim) {
FusedElemwiseAndActGradComputeNoBroadcast< FusedElemwiseAndActGradComputeNoBroadcast<
...@@ -2768,9 +2801,11 @@ void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx, ...@@ -2768,9 +2801,11 @@ void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx,
framework::Tensor *out, framework::Tensor *out,
framework::Tensor *intermediate_out) { framework::Tensor *intermediate_out) {
if (KeepIntermediateOut) { if (KeepIntermediateOut) {
PADDLE_ENFORCE(intermediate_out, PADDLE_ENFORCE_NOT_NULL(
"The save_intermediate_out is opened, " intermediate_out,
"intermediate_out should not be nullptr."); platform::errors::InvalidArgument(
"The save_intermediate_out is opened, intermediate "
"out is null pointer."));
} }
const framework::DDim &x_dim = x.dims(); const framework::DDim &x_dim = x.dims();
......
...@@ -41,9 +41,10 @@ class ElementwisePowKernel : public framework::OpKernel<T> { ...@@ -41,9 +41,10 @@ class ElementwisePowKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
using Tensor = framework::LoDTensor; using Tensor = framework::LoDTensor;
auto* x = ctx.Input<Tensor>("X"); auto* x = ctx.Input<Tensor>("X");
PADDLE_ENFORCE(x != nullptr, PADDLE_ENFORCE_EQ(x != nullptr, true,
"Cannot get input Variable X, variable name = %s", platform::errors::NotFound(
ctx.InputName("X")); "Cannot get input Variable X, Variable name = %s",
ctx.InputName("X")));
auto* y = ctx.Input<Tensor>("Y"); auto* y = ctx.Input<Tensor>("Y");
auto* z = ctx.Output<Tensor>("Out"); auto* z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace()); z->mutable_data<T>(ctx.GetPlace());
......
...@@ -76,7 +76,9 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel<T> { ...@@ -76,7 +76,9 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel<T> {
get_mid_dims(x_dims, y_dims_untrimmed, axis, &pre, &num, &post, get_mid_dims(x_dims, y_dims_untrimmed, axis, &pre, &num, &post,
&is_run_common_broadcast); &is_run_common_broadcast);
if (post == 1) PADDLE_THROW("Not implemented when post is 1"); if (post == 1)
PADDLE_THROW(
platform::errors::Unimplemented("Not implemented when post is 1."));
const int64_t n = x_dims[0]; const int64_t n = x_dims[0];
const int64_t c = x_dims[1]; const int64_t c = x_dims[1];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册