未验证 提交 41b59555 编写于 作者: C Chen Weihang 提交者: GitHub

Polish no onwer ops error message (#27448)

* polish no onwer op error message

* fix unittest failed

* polish details based reviewer comment
上级 43240a1b
......@@ -69,12 +69,18 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("alpha", "The scale of Original Embedding.")
.SetDefault(1.0f)
.AddCustomChecker([](const float& alpha) {
PADDLE_ENFORCE(alpha >= 0.0f, "'alpha' must be above 0.0.");
PADDLE_ENFORCE_GE(
alpha, 0.0f,
platform::errors::InvalidArgument(
"Attribute 'alpha' must be greater than or equal to 0.0."));
});
AddAttr<float>("beta", "The scale of Position Embedding.")
.SetDefault(1.0f)
.AddCustomChecker([](const float& beta) {
PADDLE_ENFORCE(beta >= 0.0f, "'beta' must be between 0.0.");
PADDLE_ENFORCE_GE(
beta, 0.0f,
platform::errors::InvalidArgument(
"Attribute 'beta' must be greater than or equal to 0.0."));
});
AddComment(R"DOC(
Add Position Encoding Operator.
......
......@@ -76,7 +76,10 @@ class AssignValueKernel : public framework::OpKernel<T> {
value_name = "int64_values";
break;
default:
PADDLE_THROW("Unsupported dtype for assign_value_op: %d", dtype);
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type(code %d) for AssignValue operator, only "
"supports bool, int32, float32 and int64.",
dtype));
break;
}
CopyVecotorToTensor<T>(value_name, out, ctx);
......
......@@ -33,29 +33,37 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> {
auto out_vars = context.MultiOutputVar("Output");
PADDLE_ENFORCE_GT(in_var_names.size(), static_cast<size_t>(0),
"The CoalesceTensorOp has no input.");
PADDLE_ENFORCE_EQ(
in_var_names.size(), out_var_names.size(),
"The number of CoalesceTensorOp's input and output is not match.");
platform::errors::InvalidArgument(
"The CoalesceTensor operator has no input."));
PADDLE_ENFORCE_EQ(in_var_names.size(), out_var_names.size(),
platform::errors::InvalidArgument(
"The number of CoalesceTensor operator's input and "
"output is not match, "
"input number is %u, output number is %u.",
in_var_names.size(), out_var_names.size()));
// Input & Output check: only support LoDTensor
for (size_t i = 0; i < in_var_names.size(); ++i) {
PADDLE_ENFORCE_NOT_NULL(
in_vars[i],
"The input variable %s of CoalesceTensorOp does not exist.",
in_var_names[i]);
platform::errors::NotFound("The input variable %s of CoalesceTensor "
"operator does not exist.",
in_var_names[i]));
PADDLE_ENFORCE_NOT_NULL(
out_vars[i],
"The output variable %s of CoalesceTensorOp does not exist.",
out_var_names[i]);
PADDLE_ENFORCE_EQ(
in_vars[i]->IsType<framework::LoDTensor>(), true,
"The input variable %s of CoalesceTensorOp is not LoDTensor.",
in_var_names[i]);
PADDLE_ENFORCE_EQ(
out_vars[i]->IsType<framework::LoDTensor>(), true,
"The output variable %s of CoalesceTensorOp is not LoDTensor.",
in_var_names[i]);
platform::errors::NotFound("The output variable %s of CoalesceTensor "
"operator does not exist.",
out_var_names[i]));
PADDLE_ENFORCE_EQ(in_vars[i]->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The input variable %s of CoalesceTensor operator "
"is not LoDTensor.",
in_var_names[i]));
PADDLE_ENFORCE_EQ(out_vars[i]->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The output variable %s of CoalesceTensor operator "
"is not LoDTensor.",
in_var_names[i]));
}
auto in_tensors = context.MultiInput<framework::LoDTensor>("Input");
......@@ -64,7 +72,10 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> {
for (size_t i = 0; i < in_var_names.size(); ++i) {
PADDLE_ENFORCE_EQ(
in_var_names[i], out_var_names[i],
"The input and output variable of CoalesceTensorOp is different.");
platform::errors::InvalidArgument(
"The input and output variable of CoalesceTensor operator is "
"different, %dth input is %s, %dth output is %s.",
i, in_var_names[i], i, out_var_names[i]));
}
} else {
// Init the output as input
......@@ -134,16 +145,25 @@ class CoalesceTensorOpKernel : public framework::OpKernel<T> {
const std::vector<const framework::LoDTensor *> &lod_tensors,
const std::vector<std::string> var_names, size_t *numel,
const size_t &size_of_dtype, const platform::Place &place) const {
PADDLE_ENFORCE_EQ(lod_tensors.size(), var_names.size());
PADDLE_ENFORCE_EQ(
lod_tensors.size(), var_names.size(),
platform::errors::InvalidArgument(
"The number of input tensor and variable does not match, the "
"number of input tensor is %u, the number of input variable is %u.",
lod_tensors.size(), var_names.size()));
*numel = 0;
std::stringstream ss;
ss << "alloc_space_for_vars: ";
for (size_t i = 0; i < var_names.size(); ++i) {
PADDLE_ENFORCE_EQ(lod_tensors[i]->IsInitialized(), true,
"%s is not initialized.", var_names[i]);
platform::errors::InvalidArgument(
"Tensor `%s` is not initialized.", var_names[i]));
auto size = lod_tensors[i]->numel();
PADDLE_ENFORCE_GT(size, 0);
PADDLE_ENFORCE_GT(
size, 0,
platform::errors::InvalidArgument(
"The number of tensor `%s`'s elements is 0.", var_names[i]));
ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims()
<< ") "
<< " addres:" << lod_tensors[i]->data<void>() << ", ";
......
......@@ -45,10 +45,8 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of DequantizeMaxAbsOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of DequantizeMaxAbsOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "DequantizeMaxAbs");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "DequantizeMaxAbs");
ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
......
......@@ -532,7 +532,8 @@ static int count_contours(polygon_node *polygon) {
}
static void add_left(polygon_node *p, double x, double y) {
PADDLE_ENFORCE_NOT_NULL(p);
PADDLE_ENFORCE_NOT_NULL(p, paddle::platform::errors::InvalidArgument(
"Input polygon node is nullptr."));
vertex_node *nv = NULL;
/* Create a new vertex node and set its fields */
......@@ -588,7 +589,8 @@ static void add_right(polygon_node *p, double x, double y) {
}
static void merge_right(polygon_node *p, polygon_node *q, polygon_node *list) {
PADDLE_ENFORCE_NOT_NULL(p);
PADDLE_ENFORCE_NOT_NULL(p, paddle::platform::errors::InvalidArgument(
"Input polygon node is nullptr."));
polygon_node *target = NULL;
/* Label contour as external */
......@@ -664,7 +666,8 @@ void add_vertex(vertex_node **t, double x, double y) {
}
void gpc_vertex_create(edge_node *e, int p, int s, double x, double y) {
PADDLE_ENFORCE_NOT_NULL(e);
PADDLE_ENFORCE_NOT_NULL(e, paddle::platform::errors::InvalidArgument(
"Input edge node is nullptr."));
add_vertex(&(e->outp[p]->v[s]), x, y);
e->outp[p]->active++;
}
......@@ -693,7 +696,8 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
gpc_malloc<bbox>(box, p->num_contours * sizeof(bbox),
const_cast<char *>("Bounding box creation"));
PADDLE_ENFORCE_NOT_NULL(box);
PADDLE_ENFORCE_NOT_NULL(box, paddle::platform::errors::ResourceExhausted(
"Failed to malloc box memory."));
/* Construct contour bounding boxes */
for (c = 0; c < p->num_contours; c++) {
......@@ -857,7 +861,9 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Create an extended hole array */
gpc_malloc<int>(extended_hole, (p->num_contours + 1) * sizeof(int),
const_cast<char *>("contour hole addition"));
PADDLE_ENFORCE_NOT_NULL(extended_hole);
PADDLE_ENFORCE_NOT_NULL(extended_hole,
paddle::platform::errors::ResourceExhausted(
"Failed to malloc extended hole memory."));
/* Create an extended contour array */
gpc_malloc<gpc_vertex_list>(extended_contour,
......@@ -975,7 +981,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */
gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
const_cast<char *>("sbt creation"));
PADDLE_ENFORCE_NOT_NULL(sbt);
PADDLE_ENFORCE_NOT_NULL(sbt, paddle::platform::errors::ResourceExhausted(
"Failed to malloc scanbeam table memory."));
build_sbt(&scanbeam, sbt, sbtree);
scanbeam = 0;
free_sbtree(&sbtree);
......@@ -1017,7 +1025,9 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
e0 = aet;
e1 = aet;
/* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL(aet);
PADDLE_ENFORCE_NOT_NULL(aet, paddle::platform::errors::InvalidArgument(
"Edge node AET is nullptr."));
aet->bundle[ABOVE][aet->type] = (aet->top.y != yb);
aet->bundle[ABOVE][!aet->type] = 0;
aet->bstate[ABOVE] = UNBUNDLED;
......@@ -1612,7 +1622,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */
gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
const_cast<char *>("sbt creation"));
PADDLE_ENFORCE_NOT_NULL(sbt);
PADDLE_ENFORCE_NOT_NULL(sbt, paddle::platform::errors::ResourceExhausted(
"Failed to malloc scanbeam table memory."));
build_sbt(&scanbeam, sbt, sbtree);
scanbeam = 0;
free_sbtree(&sbtree);
......@@ -1650,7 +1661,8 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
e1 = aet;
/* Set up bundle fields of first edge */
PADDLE_ENFORCE_NOT_NULL(aet);
PADDLE_ENFORCE_NOT_NULL(aet, paddle::platform::errors::InvalidArgument(
"Edge node AET is nullptr."));
aet->bundle[ABOVE][aet->type] = (aet->top.y != yb);
aet->bundle[ABOVE][!aet->type] = 0;
aet->bstate[ABOVE] = UNBUNDLED;
......
......@@ -48,7 +48,9 @@ class FetchBarrierOp : public framework::OperatorBase {
}
for (size_t i = 0; i < rets.size(); i++) {
PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U, "internal error in RPCClient");
PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U,
platform::errors::Unavailable(
"Internal error occurred in RPCClient."));
}
}
};
......
......@@ -34,16 +34,16 @@ inline bool NeedSend(const framework::Scope& scope,
std::string::npos)
return false;
auto* var = scope.FindVar(varname);
PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.",
varname);
PADDLE_ENFORCE_NOT_NULL(
var, platform::errors::NotFound(
"Can not find variable '%s' in the send side.", varname));
if (var->IsType<framework::LoDTensor>()) {
return var->Get<framework::LoDTensor>().IsInitialized();
} else if (var->IsType<framework::SelectedRows>()) {
return var->Get<framework::SelectedRows>().rows().size() > 0UL;
} else {
PADDLE_THROW(
"Variable type in send side should be in "
"[LodTensor, SelectedRows]");
PADDLE_THROW(platform::errors::Unimplemented(
"Variable type in send side should be LodTensor or SelectedRows."));
}
return false;
}
......
......@@ -47,7 +47,9 @@ class GRUUnitKernel : public framework::OpKernel<T> {
else if (act_type == relu)
ReluFunctor<T>()(d, x, y);
else
PADDLE_THROW("unsupported activation type");
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported activation type, only supports identity, sigmoid, tanh "
"and relu."));
}
void Compute(const framework::ExecutionContext& context) const override {
......@@ -137,7 +139,9 @@ class GRUUnitGradKernel : public framework::OpKernel<T> {
else if (act_type == relu)
ReluGradFunctor<T>()(d, x, y, dy, dx);
else
PADDLE_THROW("unsupported activation type");
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported activation type, only supports identity, sigmoid, tanh "
"and relu."));
}
void Compute(const framework::ExecutionContext& context) const override {
......
......@@ -104,12 +104,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
auto dim_x = ctx->GetInputDim("X");
auto interp_method = ctx->Attrs().Get<std::string>("interp_method");
PADDLE_ENFORCE(
"bilinear" == interp_method || "nearest" == interp_method ||
"bicubic" == interp_method,
"Interpolation method can only be \"bilinear\" or \"nearest\" when "
"Input(X) dimension is 4, but got method = %s .",
interp_method);
PADDLE_ENFORCE_EQ("bilinear" == interp_method || "nearest" == interp_method ||
"bicubic" == interp_method,
true, platform::errors::InvalidArgument(
"Interpolation method can only be \"bilinear\" "
"or \"nearest\" or \"bicubic\" when "
"Input(X) dimension is 4, but got method is %s.",
interp_method));
const DataLayout data_layout = framework::StringToDataLayout(
ctx->Attrs().Get<std::string>("data_layout"));
......@@ -169,13 +170,13 @@ static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) {
auto out_size_dim = ctx->GetInputDim("OutSize");
PADDLE_ENFORCE_EQ(
out_size_dim.size(), 1,
platform::errors::InvalidArgument(
"OutSize's dimension size must be 1, but got dimension = %d .",
out_size_dim.size()));
platform::errors::InvalidArgument("OutSize's dimension size must be 1, "
"but got dimension size is %d .",
out_size_dim.size()));
PADDLE_ENFORCE_EQ(
out_size_dim[0], 2,
platform::errors::InvalidArgument(
"OutSize's dim[0] must be 2, but got dimention = %d .",
"OutSize's dimension[0] must be 2, but got dimension[0] is %d .",
out_size_dim[0]));
ctx->ShareLoD("X", "Out");
return;
......@@ -264,12 +265,15 @@ static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) {
if (ctx->HasInput("OutSize") && ctx->IsRuntime()) {
auto out_size_dim = ctx->GetInputDim("OutSize");
PADDLE_ENFORCE_EQ(out_size_dim.size(), 1,
"OutSize's dimension size must be 1, but got size =%d .",
out_size_dim.size());
PADDLE_ENFORCE_EQ(
out_size_dim.size(), 1,
platform::errors::InvalidArgument(
"OutSize's dimension size must be 1, but got size is %d.",
out_size_dim.size()));
PADDLE_ENFORCE_EQ(out_size_dim[0], 3,
"OutSize's dim[0] must be 3, but got size = %d .",
out_size_dim[0]);
platform::errors::InvalidArgument(
"OutSize's dim[0] must be 3, but got size is %d.",
out_size_dim[0]));
ctx->ShareLoD("X", "Out");
return;
}
......@@ -289,10 +293,8 @@ class InterpolateOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of InterpolateOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of InterpolationOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate");
auto dim_x = ctx->GetInputDim("X"); // NCHW format
PADDLE_ENFORCE(
......@@ -534,9 +536,10 @@ class InterpolateOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@GRAD", "InterpolateGrad");
auto dim_x = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), dim_x);
......
......@@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase {
scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
auto level = static_cast<size_t>(Attr<int>("level"));
PADDLE_ENFORCE(in_true.numel() || in_false.numel(),
"Input(InTrue) or Input(InFalse) should be initialized.");
PADDLE_ENFORCE_EQ(
in_true.numel() || in_false.numel(), true,
platform::errors::InvalidArgument(
"Input(InTrue) or Input(InFalse) should be initialized."));
auto &mask_dim = mask.dims();
std::unique_ptr<framework::LoDTensor> cpu_mask{new framework::LoDTensor()};
......@@ -56,7 +58,9 @@ class MergeLoDTensorOp : public framework::OperatorBase {
framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx,
cpu_mask.get());
#else
PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option");
PADDLE_THROW(platform::errors::PreconditionNotMet(
"Not supported GPU, Please recompile or reinstall paddle with CUDA "
"support."));
#endif
}
auto *mask_data = cpu_mask->data<bool>();
......@@ -109,7 +113,11 @@ class MergeLoDTensorOp : public framework::OperatorBase {
size_t start_offset = lod_and_offset.second.first;
size_t end_offset = lod_and_offset.second.second;
PADDLE_ENFORCE_GE(end_offset, start_offset);
PADDLE_ENFORCE_GE(end_offset, start_offset,
platform::errors::InvalidArgument(
"The end offset less than start offset, end offset "
"is %d, start offset is %d.",
end_offset, start_offset));
size_t len = end_offset - start_offset;
if (len == 0) {
continue;
......@@ -189,22 +197,24 @@ class MergeLoDTensorInferShape : public framework::InferShapeBase {
"merge_lod_tensor");
auto mask_dim = context->GetInputDim("Mask");
PADDLE_ENFORCE_EQ(mask_dim.size(), 2,
"If you are using IfElse OP:"
"\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
"ie.true_block():\n out_1 = ie.input(x)\n\n"
"Please ensure that the cond should be a 2-D tensor and "
"the second dim size of cond should be 1. "
"But now the cond's shape is [",
*mask_dim.Get(), "].\n");
platform::errors::InvalidArgument(
"If you are using IfElse OP:"
"\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
"ie.true_block():\n out_1 = ie.input(x)\n\n"
"Please ensure that the cond is a 2-D tensor and "
"the second dim size of cond is 1. "
"But now the cond's shape is [%s].\n",
mask_dim));
if (context->IsRuntime() || mask_dim[1] > 0) {
PADDLE_ENFORCE_EQ(mask_dim[1], 1,
"If you are using IfElse OP:"
"\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
"ie.true_block():\n out_1 = ie.input(x)\n\n"
"Please ensure that the cond should be a 2-D tensor "
"and the second dim size of cond should be 1. "
"But now the cond's shape is [",
*mask_dim.Get(), "].\n");
platform::errors::InvalidArgument(
"If you are using IfElse OP:"
"\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
"ie.true_block():\n out_1 = ie.input(x)\n\n"
"Please ensure that the cond is a 2-D tensor "
"and the second dim size of cond is 1. "
"But now the cond's shape is [%s].\n",
mask_dim));
}
context->SetOutputDim("Out", context->GetInputDim("InTrue"));
......
......@@ -60,20 +60,33 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx,
auto place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(),
"src and dst tensor should have the same dims size.");
platform::errors::InvalidArgument(
"Source and destination tensor should have the same "
"dimension size, but source tensor dimension size is "
"%u, destination tensor size is %u.",
src_stride_numel.size(), dst_stride_numel.size()));
for (int64_t i = 0; i < axis; ++i) {
if (i < axis) {
PADDLE_ENFORCE_EQ(src_stride_numel[i] / src_stride_numel[axis],
dst_stride_numel[i] / dst_stride_numel[axis],
"src and dst should have the same elements "
"except the specified axis.");
PADDLE_ENFORCE_EQ(
src_stride_numel[i] / src_stride_numel[axis],
dst_stride_numel[i] / dst_stride_numel[axis],
platform::errors::InvalidArgument(
"Source and destination tensor should have the same number of "
"elements except the specified axis, but the source elements "
"number is %d, destination elements number is %d.",
src_stride_numel[i] / src_stride_numel[axis],
dst_stride_numel[i] / dst_stride_numel[axis]));
} else if (i == axis) {
continue;
} else {
PADDLE_ENFORCE_EQ(src_stride_numel[i], dst_stride_numel[i],
"src and dst should have the same elements "
"except the specified axis.");
PADDLE_ENFORCE_EQ(
src_stride_numel[i], dst_stride_numel[i],
platform::errors::InvalidArgument(
"Source and destination tensor should have the same number of "
"elements except the specified axis, but the source elements "
"number is %d, destination elements number is %d.",
src_stride_numel[i], dst_stride_numel[i]));
}
}
......@@ -90,7 +103,8 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx,
memory::Copy(gpu_place, dst + i * dst_after, gpu_place,
src + i * src_after, sizeof(T) * size, cuda_ctx.stream());
#else
PADDLE_THROW("Paddle is not compiled with GPU");
PADDLE_THROW(platform::errors::PreconditionNotMet(
"Paddle is not compiled with GPU."));
#endif
}
}
......
......@@ -78,21 +78,35 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const {
platform::errors::NotFound("Col(Output) of VarConv2dOP is not found."));
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dims.size(), 2,
"The rank of X(Input) can't be less than 2.");
PADDLE_ENFORCE_EQ(
x_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of X(Input) can't be less than 2, but received rank is %u.",
x_dims.size()));
auto w_dims = ctx->GetInputDim("W");
PADDLE_ENFORCE_EQ(w_dims.size(), 2, "W should be 2-D tensor");
PADDLE_ENFORCE_EQ(
w_dims.size(), 2,
platform::errors::InvalidArgument(
"Input W should be a 2-D tensor, but its actual dimension is %u.",
w_dims.size()));
int output_channel = ctx->Attrs().Get<int>("OutputChannel");
int input_channel = ctx->Attrs().Get<int>("InputChannel");
int kernel_h = ctx->Attrs().Get<int>("KernelH");
int kernel_w = ctx->Attrs().Get<int>("KernelW");
PADDLE_ENFORCE_EQ(w_dims[0], output_channel,
"W dim[0] should be equal to OutputChannel");
PADDLE_ENFORCE_EQ(
w_dims[0], output_channel,
platform::errors::InvalidArgument(
"Input W's dimension[0] should be equal to OutputChannel, the "
"dimension[0] is %d, OutputChannel is %d.",
w_dims[0], output_channel));
PADDLE_ENFORCE_EQ(
w_dims[1], input_channel * kernel_h * kernel_w,
"W dim[1] should be equal to InputChannel * StrideH * StrideW");
platform::errors::InvalidArgument(
"Input W's dimension[1] should be equal to InputChannel * StrideH * "
"StrideW, the dimension[1] is %d, expected value is %d.",
w_dims[1], input_channel * kernel_h * kernel_w));
if (ctx->IsRuntime()) {
framework::Variable* x_var =
......@@ -103,10 +117,14 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const {
platform::errors::InvalidArgument("The Input(X) Tensor of VarConv2dOP "
"does not contain LoD information."));
PADDLE_ENFORCE_GE(x_lod.size(), 1, "The Input(X)'s lod info is corrupted.");
PADDLE_ENFORCE_EQ(
x_dims[0], static_cast<int64_t>(x_lod[0].back()),
"The Input(X)'s lod info mismatches the actual tensor shape.");
PADDLE_ENFORCE_GE(x_lod.size(), 1,
platform::errors::InvalidArgument(
"The Input(X)'s lod info is corrupted."));
PADDLE_ENFORCE_EQ(x_dims[0], static_cast<int64_t>(x_lod[0].back()),
platform::errors::InvalidArgument(
"The Input(X)'s lod info mismatches the actual "
"tensor shape, input lod is %s, tensor shape is %s.",
x_lod, x_dims));
framework::Variable* row_var =
BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册