未验证 提交 41b59555 编写于 作者: C Chen Weihang 提交者: GitHub

Polish no onwer ops error message (#27448)

* polish no onwer op error message

* fix unittest failed

* polish details based reviewer comment
上级 43240a1b
...@@ -69,12 +69,18 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -69,12 +69,18 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("alpha", "The scale of Original Embedding.") AddAttr<float>("alpha", "The scale of Original Embedding.")
.SetDefault(1.0f) .SetDefault(1.0f)
.AddCustomChecker([](const float& alpha) { .AddCustomChecker([](const float& alpha) {
PADDLE_ENFORCE(alpha >= 0.0f, "'alpha' must be above 0.0."); PADDLE_ENFORCE_GE(
alpha, 0.0f,
platform::errors::InvalidArgument(
"Attribute 'alpha' must be greater than or equal to 0.0."));
}); });
AddAttr<float>("beta", "The scale of Position Embedding.") AddAttr<float>("beta", "The scale of Position Embedding.")
.SetDefault(1.0f) .SetDefault(1.0f)
.AddCustomChecker([](const float& beta) { .AddCustomChecker([](const float& beta) {
PADDLE_ENFORCE(beta >= 0.0f, "'beta' must be between 0.0."); PADDLE_ENFORCE_GE(
beta, 0.0f,
platform::errors::InvalidArgument(
"Attribute 'beta' must be greater than or equal to 0.0."));
}); });
AddComment(R"DOC( AddComment(R"DOC(
Add Position Encoding Operator. Add Position Encoding Operator.
......
...@@ -76,7 +76,10 @@ class AssignValueKernel : public framework::OpKernel<T> { ...@@ -76,7 +76,10 @@ class AssignValueKernel : public framework::OpKernel<T> {
value_name = "int64_values"; value_name = "int64_values";
break; break;
default: default:
PADDLE_THROW("Unsupported dtype for assign_value_op: %d", dtype); PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type(code %d) for AssignValue operator, only "
"supports bool, int32, float32 and int64.",
dtype));
break; break;
} }
CopyVecotorToTensor<T>(value_name, out, ctx); CopyVecotorToTensor<T>(value_name, out, ctx);
......
...@@ -33,29 +33,37 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> { ...@@ -33,29 +33,37 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> {
auto out_vars = context.MultiOutputVar("Output"); auto out_vars = context.MultiOutputVar("Output");
PADDLE_ENFORCE_GT(in_var_names.size(), static_cast<size_t>(0), PADDLE_ENFORCE_GT(in_var_names.size(), static_cast<size_t>(0),
"The CoalesceTensorOp has no input."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ( "The CoalesceTensor operator has no input."));
in_var_names.size(), out_var_names.size(), PADDLE_ENFORCE_EQ(in_var_names.size(), out_var_names.size(),
"The number of CoalesceTensorOp's input and output is not match."); platform::errors::InvalidArgument(
"The number of CoalesceTensor operator's input and "
"output is not match, "
"input number is %u, output number is %u.",
in_var_names.size(), out_var_names.size()));
// Input & Output check: only support LoDTensor // Input & Output check: only support LoDTensor
for (size_t i = 0; i < in_var_names.size(); ++i) { for (size_t i = 0; i < in_var_names.size(); ++i) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
in_vars[i], in_vars[i],
"The input variable %s of CoalesceTensorOp does not exist.", platform::errors::NotFound("The input variable %s of CoalesceTensor "
in_var_names[i]); "operator does not exist.",
in_var_names[i]));
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
out_vars[i], out_vars[i],
"The output variable %s of CoalesceTensorOp does not exist.", platform::errors::NotFound("The output variable %s of CoalesceTensor "
out_var_names[i]); "operator does not exist.",
PADDLE_ENFORCE_EQ( out_var_names[i]));
in_vars[i]->IsType<framework::LoDTensor>(), true, PADDLE_ENFORCE_EQ(in_vars[i]->IsType<framework::LoDTensor>(), true,
"The input variable %s of CoalesceTensorOp is not LoDTensor.", platform::errors::InvalidArgument(
in_var_names[i]); "The input variable %s of CoalesceTensor operator "
PADDLE_ENFORCE_EQ( "is not LoDTensor.",
out_vars[i]->IsType<framework::LoDTensor>(), true, in_var_names[i]));
"The output variable %s of CoalesceTensorOp is not LoDTensor.", PADDLE_ENFORCE_EQ(out_vars[i]->IsType<framework::LoDTensor>(), true,
in_var_names[i]); platform::errors::InvalidArgument(
"The output variable %s of CoalesceTensor operator "
"is not LoDTensor.",
in_var_names[i]));
} }
auto in_tensors = context.MultiInput<framework::LoDTensor>("Input"); auto in_tensors = context.MultiInput<framework::LoDTensor>("Input");
...@@ -64,7 +72,10 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> { ...@@ -64,7 +72,10 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> {
for (size_t i = 0; i < in_var_names.size(); ++i) { for (size_t i = 0; i < in_var_names.size(); ++i) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_var_names[i], out_var_names[i], in_var_names[i], out_var_names[i],
"The input and output variable of CoalesceTensorOp is different."); platform::errors::InvalidArgument(
"The input and output variable of CoalesceTensor operator is "
"different, %dth input is %s, %dth output is %s.",
i, in_var_names[i], i, out_var_names[i]));
} }
} else { } else {
// Init the output as input // Init the output as input
...@@ -134,16 +145,25 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> { ...@@ -134,16 +145,25 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> {
const std::vector<const framework::LoDTensor *> &lod_tensors, const std::vector<const framework::LoDTensor *> &lod_tensors,
const std::vector<std::string> var_names, size_t *numel, const std::vector<std::string> var_names, size_t *numel,
const size_t &size_of_dtype, const platform::Place &place) const { const size_t &size_of_dtype, const platform::Place &place) const {
PADDLE_ENFORCE_EQ(lod_tensors.size(), var_names.size()); PADDLE_ENFORCE_EQ(
lod_tensors.size(), var_names.size(),
platform::errors::InvalidArgument(
"The number of input tensor and variable does not match, the "
"number of input tensor is %u, the number of input variable is %u.",
lod_tensors.size(), var_names.size()));
*numel = 0; *numel = 0;
std::stringstream ss; std::stringstream ss;
ss << "alloc_space_for_vars: "; ss << "alloc_space_for_vars: ";
for (size_t i = 0; i < var_names.size(); ++i) { for (size_t i = 0; i < var_names.size(); ++i) {
PADDLE_ENFORCE_EQ(lod_tensors[i]->IsInitialized(), true, PADDLE_ENFORCE_EQ(lod_tensors[i]->IsInitialized(), true,
"%s is not initialized.", var_names[i]); platform::errors::InvalidArgument(
"Tensor `%s` is not initialized.", var_names[i]));
auto size = lod_tensors[i]->numel(); auto size = lod_tensors[i]->numel();
PADDLE_ENFORCE_GT(size, 0); PADDLE_ENFORCE_GT(
size, 0,
platform::errors::InvalidArgument(
"The number of tensor `%s`'s elements is 0.", var_names[i]));
ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims() ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims()
<< ") " << ") "
<< " addres:" << lod_tensors[i]->data<void>() << ", "; << " addres:" << lod_tensors[i]->data<void>() << ", ";
......
...@@ -45,10 +45,8 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel { ...@@ -45,10 +45,8 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "DequantizeMaxAbs");
"Input(X) of DequantizeMaxAbsOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "DequantizeMaxAbs");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of DequantizeMaxAbsOp should not be null.");
ctx->ShareDim("X", /*->*/ "Out"); ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
......
...@@ -532,7 +532,8 @@ static int count_contours(polygon_node *polygon) { ...@@ -532,7 +532,8 @@ static int count_contours(polygon_node *polygon) {
} }
static void add_left(polygon_node *p, double x, double y) { static void add_left(polygon_node *p, double x, double y) {
PADDLE_ENFORCE_NOT_NULL(p); PADDLE_ENFORCE_NOT_NULL(p, paddle::platform::errors::InvalidArgument(
"Input polygon node is nullptr."));
vertex_node *nv = NULL; vertex_node *nv = NULL;
/* Create a new vertex node and set its fields */ /* Create a new vertex node and set its fields */
...@@ -588,7 +589,8 @@ static void add_right(polygon_node *p, double x, double y) { ...@@ -588,7 +589,8 @@ static void add_right(polygon_node *p, double x, double y) {
} }
static void merge_right(polygon_node *p, polygon_node *q, polygon_node *list) { static void merge_right(polygon_node *p, polygon_node *q, polygon_node *list) {
PADDLE_ENFORCE_NOT_NULL(p); PADDLE_ENFORCE_NOT_NULL(p, paddle::platform::errors::InvalidArgument(
"Input polygon node is nullptr."));
polygon_node *target = NULL; polygon_node *target = NULL;
/* Label contour as external */ /* Label contour as external */
...@@ -664,7 +666,8 @@ void add_vertex(vertex_node **t, double x, double y) { ...@@ -664,7 +666,8 @@ void add_vertex(vertex_node **t, double x, double y) {
} }
void gpc_vertex_create(edge_node *e, int p, int s, double x, double y) { void gpc_vertex_create(edge_node *e, int p, int s, double x, double y) {
PADDLE_ENFORCE_NOT_NULL(e); PADDLE_ENFORCE_NOT_NULL(e, paddle::platform::errors::InvalidArgument(
"Input edge node is nullptr."));
add_vertex(&(e->outp[p]->v[s]), x, y); add_vertex(&(e->outp[p]->v[s]), x, y);
e->outp[p]->active++; e->outp[p]->active++;
} }
...@@ -693,7 +696,8 @@ static bbox *create_contour_bboxes(gpc_polygon *p) { ...@@ -693,7 +696,8 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
gpc_malloc<bbox>(box, p->num_contours * sizeof(bbox), gpc_malloc<bbox>(box, p->num_contours * sizeof(bbox),
const_cast<char *>("Bounding box creation")); const_cast<char *>("Bounding box creation"));
PADDLE_ENFORCE_NOT_NULL(box); PADDLE_ENFORCE_NOT_NULL(box, paddle::platform::errors::ResourceExhausted(
"Failed to malloc box memory."));
/* Construct contour bounding boxes */ /* Construct contour bounding boxes */
for (c = 0; c < p->num_contours; c++) { for (c = 0; c < p->num_contours; c++) {
...@@ -857,7 +861,9 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { ...@@ -857,7 +861,9 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Create an extended hole array */ /* Create an extended hole array */
gpc_malloc<int>(extended_hole, (p->num_contours + 1) * sizeof(int), gpc_malloc<int>(extended_hole, (p->num_contours + 1) * sizeof(int),
const_cast<char *>("contour hole addition")); const_cast<char *>("contour hole addition"));
PADDLE_ENFORCE_NOT_NULL(extended_hole); PADDLE_ENFORCE_NOT_NULL(extended_hole,
paddle::platform::errors::ResourceExhausted(
"Failed to malloc extended hole memory."));
/* Create an extended contour array */ /* Create an extended contour array */
gpc_malloc<gpc_vertex_list>(extended_contour, gpc_malloc<gpc_vertex_list>(extended_contour,
...@@ -975,7 +981,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, ...@@ -975,7 +981,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */ /* Build scanbeam table from scanbeam tree */
gpc_malloc<double>(sbt, sbt_entries * sizeof(double), gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
const_cast<char *>("sbt creation")); const_cast<char *>("sbt creation"));
PADDLE_ENFORCE_NOT_NULL(sbt); PADDLE_ENFORCE_NOT_NULL(sbt, paddle::platform::errors::ResourceExhausted(
"Failed to malloc scanbeam table memory."));
build_sbt(&scanbeam, sbt, sbtree); build_sbt(&scanbeam, sbt, sbtree);
scanbeam = 0; scanbeam = 0;
free_sbtree(&sbtree); free_sbtree(&sbtree);
...@@ -1017,7 +1025,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, ...@@ -1017,7 +1025,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
e0 = aet; e0 = aet;
e1 = aet; e1 = aet;
/* Set up bundle fields of first edge */ /* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL(aet); PADDLE_ENFORCE_NOT_NULL(aet, paddle::platform::errors::InvalidArgument(
"Edge node AET is nullptr."));
aet->bundle[ABOVE][aet->type] = (aet->top.y != yb); aet->bundle[ABOVE][aet->type] = (aet->top.y != yb);
aet->bundle[ABOVE][!aet->type] = 0; aet->bundle[ABOVE][!aet->type] = 0;
aet->bstate[ABOVE] = UNBUNDLED; aet->bstate[ABOVE] = UNBUNDLED;
...@@ -1612,7 +1622,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, ...@@ -1612,7 +1622,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */ /* Build scanbeam table from scanbeam tree */
gpc_malloc<double>(sbt, sbt_entries * sizeof(double), gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
const_cast<char *>("sbt creation")); const_cast<char *>("sbt creation"));
PADDLE_ENFORCE_NOT_NULL(sbt); PADDLE_ENFORCE_NOT_NULL(sbt, paddle::platform::errors::ResourceExhausted(
"Failed to malloc scanbeam table memory."));
build_sbt(&scanbeam, sbt, sbtree); build_sbt(&scanbeam, sbt, sbtree);
scanbeam = 0; scanbeam = 0;
free_sbtree(&sbtree); free_sbtree(&sbtree);
...@@ -1650,7 +1661,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, ...@@ -1650,7 +1661,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
e1 = aet; e1 = aet;
/* Set up bundle fields of first edge */ /* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL(aet); PADDLE_ENFORCE_NOT_NULL(aet, paddle::platform::errors::InvalidArgument(
"Edge node AET is nullptr."));
aet->bundle[ABOVE][aet->type] = (aet->top.y != yb); aet->bundle[ABOVE][aet->type] = (aet->top.y != yb);
aet->bundle[ABOVE][!aet->type] = 0; aet->bundle[ABOVE][!aet->type] = 0;
aet->bstate[ABOVE] = UNBUNDLED; aet->bstate[ABOVE] = UNBUNDLED;
......
...@@ -48,7 +48,9 @@ class FetchBarrierOp : public framework::OperatorBase { ...@@ -48,7 +48,9 @@ class FetchBarrierOp : public framework::OperatorBase {
} }
for (size_t i = 0; i < rets.size(); i++) { for (size_t i = 0; i < rets.size(); i++) {
PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U, "internal error in RPCClient"); PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U,
platform::errors::Unavailable(
"Internal error occurred in RPCClient."));
} }
} }
}; };
......
...@@ -34,16 +34,16 @@ inline bool NeedSend(const framework::Scope& scope, ...@@ -34,16 +34,16 @@ inline bool NeedSend(const framework::Scope& scope,
std::string::npos) std::string::npos)
return false; return false;
auto* var = scope.FindVar(varname); auto* var = scope.FindVar(varname);
PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", PADDLE_ENFORCE_NOT_NULL(
varname); var, platform::errors::NotFound(
"Can not find variable '%s' in the send side.", varname));
if (var->IsType<framework::LoDTensor>()) { if (var->IsType<framework::LoDTensor>()) {
return var->Get<framework::LoDTensor>().IsInitialized(); return var->Get<framework::LoDTensor>().IsInitialized();
} else if (var->IsType<framework::SelectedRows>()) { } else if (var->IsType<framework::SelectedRows>()) {
return var->Get<framework::SelectedRows>().rows().size() > 0UL; return var->Get<framework::SelectedRows>().rows().size() > 0UL;
} else { } else {
PADDLE_THROW( PADDLE_THROW(platform::errors::Unimplemented(
"Variable type in send side should be in " "Variable type in send side should be LodTensor or SelectedRows."));
"[LodTensor, SelectedRows]");
} }
return false; return false;
} }
......
...@@ -47,7 +47,9 @@ class GRUUnitKernel : public framework::OpKernel<T> { ...@@ -47,7 +47,9 @@ class GRUUnitKernel : public framework::OpKernel<T> {
else if (act_type == relu) else if (act_type == relu)
ReluFunctor<T>()(d, x, y); ReluFunctor<T>()(d, x, y);
else else
PADDLE_THROW("unsupported activation type"); PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported activation type, only supports identity, sigmoid, tanh "
"and relu."));
} }
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -137,7 +139,9 @@ class GRUUnitGradKernel : public framework::OpKernel<T> { ...@@ -137,7 +139,9 @@ class GRUUnitGradKernel : public framework::OpKernel<T> {
else if (act_type == relu) else if (act_type == relu)
ReluGradFunctor<T>()(d, x, y, dy, dx); ReluGradFunctor<T>()(d, x, y, dy, dx);
else else
PADDLE_THROW("unsupported activation type"); PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported activation type, only supports identity, sigmoid, tanh "
"and relu."));
} }
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
......
...@@ -104,12 +104,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { ...@@ -104,12 +104,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
auto dim_x = ctx->GetInputDim("X"); auto dim_x = ctx->GetInputDim("X");
auto interp_method = ctx->Attrs().Get<std::string>("interp_method"); auto interp_method = ctx->Attrs().Get<std::string>("interp_method");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ("bilinear" == interp_method || "nearest" == interp_method ||
"bilinear" == interp_method || "nearest" == interp_method || "bicubic" == interp_method,
"bicubic" == interp_method, true, platform::errors::InvalidArgument(
"Interpolation method can only be \"bilinear\" or \"nearest\" when " "Interpolation method can only be \"bilinear\" "
"Input(X) dimension is 4, but got method = %s .", "or \"nearest\" or \"bicubic\" when "
interp_method); "Input(X) dimension is 4, but got method is %s.",
interp_method));
const DataLayout data_layout = framework::StringToDataLayout( const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout")); ctx->Attrs().Get<std::string>("data_layout"));
...@@ -169,13 +170,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { ...@@ -169,13 +170,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
auto out_size_dim = ctx->GetInputDim("OutSize"); auto out_size_dim = ctx->GetInputDim("OutSize");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
out_size_dim.size(), 1, out_size_dim.size(), 1,
platform::errors::InvalidArgument( platform::errors::InvalidArgument("OutSize's dimension size must be 1, "
"OutSize's dimension size must be 1, but got dimension = %d .", "but got dimension size is %d .",
out_size_dim.size())); out_size_dim.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
out_size_dim[0], 2, out_size_dim[0], 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"OutSize's dim[0] must be 2, but got dimention = %d .", "OutSize's dimension[0] must be 2, but got dimension[0] is %d .",
out_size_dim[0])); out_size_dim[0]));
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
return; return;
...@@ -264,12 +265,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { ...@@ -264,12 +265,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) {
if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { if (ctx->HasInput("OutSize") && ctx->IsRuntime()) {
auto out_size_dim = ctx->GetInputDim("OutSize"); auto out_size_dim = ctx->GetInputDim("OutSize");
PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, PADDLE_ENFORCE_EQ(
"OutSize's dimension size must be 1, but got size =%d .", out_size_dim.size(), 1,
out_size_dim.size()); platform::errors::InvalidArgument(
"OutSize's dimension size must be 1, but got size is %d.",
out_size_dim.size()));
PADDLE_ENFORCE_EQ(out_size_dim[0], 3, PADDLE_ENFORCE_EQ(out_size_dim[0], 3,
"OutSize's dim[0] must be 3, but got size = %d .", platform::errors::InvalidArgument(
out_size_dim[0]); "OutSize's dim[0] must be 3, but got size is %d.",
out_size_dim[0]));
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
return; return;
} }
...@@ -289,10 +293,8 @@ class InterpolateOp : public framework::OperatorWithKernel { ...@@ -289,10 +293,8 @@ class InterpolateOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate");
"Input(X) of InterpolateOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of InterpolationOp should not be null.");
auto dim_x = ctx->GetInputDim("X"); // NCHW format auto dim_x = ctx->GetInputDim("X"); // NCHW format
PADDLE_ENFORCE( PADDLE_ENFORCE(
...@@ -534,9 +536,10 @@ class InterpolateOpGrad : public framework::OperatorWithKernel { ...@@ -534,9 +536,10 @@ class InterpolateOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null"); "Out@GRAD", "InterpolateGrad");
auto dim_x = ctx->GetInputDim("X"); auto dim_x = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), dim_x); ctx->SetOutputDim(framework::GradVarName("X"), dim_x);
......
...@@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase { ...@@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase {
scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>(); scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
auto level = static_cast<size_t>(Attr<int>("level")); auto level = static_cast<size_t>(Attr<int>("level"));
PADDLE_ENFORCE(in_true.numel() || in_false.numel(), PADDLE_ENFORCE_EQ(
"Input(InTrue) or Input(InFalse) should be initialized."); in_true.numel() || in_false.numel(), true,
platform::errors::InvalidArgument(
"Input(InTrue) or Input(InFalse) should be initialized."));
auto &mask_dim = mask.dims(); auto &mask_dim = mask.dims();
std::unique_ptr<framework::LoDTensor> cpu_mask{new framework::LoDTensor()}; std::unique_ptr<framework::LoDTensor> cpu_mask{new framework::LoDTensor()};
...@@ -56,7 +58,9 @@ class MergeLoDTensorOp : public framework::OperatorBase { ...@@ -56,7 +58,9 @@ class MergeLoDTensorOp : public framework::OperatorBase {
framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx, framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx,
cpu_mask.get()); cpu_mask.get());
#else #else
PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option"); PADDLE_THROW(platform::errors::PreconditionNotMet(
"Not supported GPU, Please recompile or reinstall paddle with CUDA "
"support."));
#endif #endif
} }
auto *mask_data = cpu_mask->data<bool>(); auto *mask_data = cpu_mask->data<bool>();
...@@ -109,7 +113,11 @@ class MergeLoDTensorOp : public framework::OperatorBase { ...@@ -109,7 +113,11 @@ class MergeLoDTensorOp : public framework::OperatorBase {
size_t start_offset = lod_and_offset.second.first; size_t start_offset = lod_and_offset.second.first;
size_t end_offset = lod_and_offset.second.second; size_t end_offset = lod_and_offset.second.second;
PADDLE_ENFORCE_GE(end_offset, start_offset); PADDLE_ENFORCE_GE(end_offset, start_offset,
platform::errors::InvalidArgument(
"The end offset less than start offset, end offset "
"is %d, start offset is %d.",
end_offset, start_offset));
size_t len = end_offset - start_offset; size_t len = end_offset - start_offset;
if (len == 0) { if (len == 0) {
continue; continue;
...@@ -189,22 +197,24 @@ class MergeLoDTensorInferShape : public framework::InferShapeBase { ...@@ -189,22 +197,24 @@ class MergeLoDTensorInferShape : public framework::InferShapeBase {
"merge_lod_tensor"); "merge_lod_tensor");
auto mask_dim = context->GetInputDim("Mask"); auto mask_dim = context->GetInputDim("Mask");
PADDLE_ENFORCE_EQ(mask_dim.size(), 2, PADDLE_ENFORCE_EQ(mask_dim.size(), 2,
"If you are using IfElse OP:" platform::errors::InvalidArgument(
"\n\nie = fluid.layers.IfElse(cond=cond)\nwith " "If you are using IfElse OP:"
"ie.true_block():\n out_1 = ie.input(x)\n\n" "\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
"Please ensure that the cond should be a 2-D tensor and " "ie.true_block():\n out_1 = ie.input(x)\n\n"
"the second dim size of cond should be 1. " "Please ensure that the cond is a 2-D tensor and "
"But now the cond's shape is [", "the second dim size of cond is 1. "
*mask_dim.Get(), "].\n"); "But now the cond's shape is [%s].\n",
mask_dim));
if (context->IsRuntime() || mask_dim[1] > 0) { if (context->IsRuntime() || mask_dim[1] > 0) {
PADDLE_ENFORCE_EQ(mask_dim[1], 1, PADDLE_ENFORCE_EQ(mask_dim[1], 1,
"If you are using IfElse OP:" platform::errors::InvalidArgument(
"\n\nie = fluid.layers.IfElse(cond=cond)\nwith " "If you are using IfElse OP:"
"ie.true_block():\n out_1 = ie.input(x)\n\n" "\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
"Please ensure that the cond should be a 2-D tensor " "ie.true_block():\n out_1 = ie.input(x)\n\n"
"and the second dim size of cond should be 1. " "Please ensure that the cond is a 2-D tensor "
"But now the cond's shape is [", "and the second dim size of cond is 1. "
*mask_dim.Get(), "].\n"); "But now the cond's shape is [%s].\n",
mask_dim));
} }
context->SetOutputDim("Out", context->GetInputDim("InTrue")); context->SetOutputDim("Out", context->GetInputDim("InTrue"));
......
...@@ -60,20 +60,33 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, ...@@ -60,20 +60,33 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx,
auto place = ctx.GetPlace(); auto place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(), PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(),
"src and dst tensor should have the same dims size."); platform::errors::InvalidArgument(
"Source and destination tensor should have the same "
"dimension size, but source tensor dimension size is "
"%u, destination tensor size is %u.",
src_stride_numel.size(), dst_stride_numel.size()));
for (int64_t i = 0; i < axis; ++i) { for (int64_t i = 0; i < axis; ++i) {
if (i < axis) { if (i < axis) {
PADDLE_ENFORCE_EQ(src_stride_numel[i] / src_stride_numel[axis], PADDLE_ENFORCE_EQ(
dst_stride_numel[i] / dst_stride_numel[axis], src_stride_numel[i] / src_stride_numel[axis],
"src and dst should have the same elements " dst_stride_numel[i] / dst_stride_numel[axis],
"except the specified axis."); platform::errors::InvalidArgument(
"Source and destination tensor should have the same number of "
"elements except the specified axis, but the source elements "
"number is %d, destination elements number is %d.",
src_stride_numel[i] / src_stride_numel[axis],
dst_stride_numel[i] / dst_stride_numel[axis]));
} else if (i == axis) { } else if (i == axis) {
continue; continue;
} else { } else {
PADDLE_ENFORCE_EQ(src_stride_numel[i], dst_stride_numel[i], PADDLE_ENFORCE_EQ(
"src and dst should have the same elements " src_stride_numel[i], dst_stride_numel[i],
"except the specified axis."); platform::errors::InvalidArgument(
"Source and destination tensor should have the same number of "
"elements except the specified axis, but the source elements "
"number is %d, destination elements number is %d.",
src_stride_numel[i], dst_stride_numel[i]));
} }
} }
...@@ -90,7 +103,8 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, ...@@ -90,7 +103,8 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx,
memory::Copy(gpu_place, dst + i * dst_after, gpu_place, memory::Copy(gpu_place, dst + i * dst_after, gpu_place,
src + i * src_after, sizeof(T) * size, cuda_ctx.stream()); src + i * src_after, sizeof(T) * size, cuda_ctx.stream());
#else #else
PADDLE_THROW("Paddle is not compiled with GPU"); PADDLE_THROW(platform::errors::PreconditionNotMet(
"Paddle is not compiled with GPU."));
#endif #endif
} }
} }
......
...@@ -78,21 +78,35 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { ...@@ -78,21 +78,35 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const {
platform::errors::NotFound("Col(Output) of VarConv2dOP is not found.")); platform::errors::NotFound("Col(Output) of VarConv2dOP is not found."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, PADDLE_ENFORCE_EQ(
"The rank of X(Input) can't be less than 2."); x_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of X(Input) can't be less than 2, but received rank is %u.",
x_dims.size()));
auto w_dims = ctx->GetInputDim("W"); auto w_dims = ctx->GetInputDim("W");
PADDLE_ENFORCE_EQ(w_dims.size(), 2, "W should be 2-D tensor"); PADDLE_ENFORCE_EQ(
w_dims.size(), 2,
platform::errors::InvalidArgument(
"Input W should be a 2-D tensor, but its actual dimension is %u.",
w_dims.size()));
int output_channel = ctx->Attrs().Get<int>("OutputChannel"); int output_channel = ctx->Attrs().Get<int>("OutputChannel");
int input_channel = ctx->Attrs().Get<int>("InputChannel"); int input_channel = ctx->Attrs().Get<int>("InputChannel");
int kernel_h = ctx->Attrs().Get<int>("KernelH"); int kernel_h = ctx->Attrs().Get<int>("KernelH");
int kernel_w = ctx->Attrs().Get<int>("KernelW"); int kernel_w = ctx->Attrs().Get<int>("KernelW");
PADDLE_ENFORCE_EQ(w_dims[0], output_channel, PADDLE_ENFORCE_EQ(
"W dim[0] should be equal to OutputChannel"); w_dims[0], output_channel,
platform::errors::InvalidArgument(
"Input W's dimension[0] should be equal to OutputChannel, the "
"dimension[0] is %d, OutputChannel is %d.",
w_dims[0], output_channel));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
w_dims[1], input_channel * kernel_h * kernel_w, w_dims[1], input_channel * kernel_h * kernel_w,
"W dim[1] should be equal to InputChannel * StrideH * StrideW"); platform::errors::InvalidArgument(
"Input W's dimension[1] should be equal to InputChannel * StrideH * "
"StrideW, the dimension[1] is %d, expected value is %d.",
w_dims[1], input_channel * kernel_h * kernel_w));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
framework::Variable* x_var = framework::Variable* x_var =
...@@ -103,10 +117,14 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { ...@@ -103,10 +117,14 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const {
platform::errors::InvalidArgument("The Input(X) Tensor of VarConv2dOP " platform::errors::InvalidArgument("The Input(X) Tensor of VarConv2dOP "
"does not contain LoD information.")); "does not contain LoD information."));
PADDLE_ENFORCE_GE(x_lod.size(), 1, "The Input(X)'s lod info is corrupted."); PADDLE_ENFORCE_GE(x_lod.size(), 1,
PADDLE_ENFORCE_EQ( platform::errors::InvalidArgument(
x_dims[0], static_cast<int64_t>(x_lod[0].back()), "The Input(X)'s lod info is corrupted."));
"The Input(X)'s lod info mismatches the actual tensor shape."); PADDLE_ENFORCE_EQ(x_dims[0], static_cast<int64_t>(x_lod[0].back()),
platform::errors::InvalidArgument(
"The Input(X)'s lod info mismatches the actual "
"tensor shape, input lod is %s, tensor shape is %s.",
x_lod, x_dims));
framework::Variable* row_var = framework::Variable* row_var =
BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]); BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册