提交 165a7bd5 编写于 作者: P phlrain

fix shape check many; test=develop

上级 4267a81a
......@@ -81,8 +81,10 @@ class WriteToArrayInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("I"), "Must set the subscript index");
PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1,
"The number of element of subscript index must be 1");
if (context->IsRuntime()) {
PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1,
"The number of element of subscript index must be 1");
}
if (!context->HasInput("X")) {
return;
}
......
......@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/data_norm_op.h"
#include <memory>
#include <string>
#include "paddle/fluid/framework/data_layout.h"
#ifdef PADDLE_WITH_MKLDNN
......@@ -65,9 +66,11 @@ class DataNormOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize").size(), 1UL);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum").size(), 1UL);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum").size(), 1UL);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C);
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C);
}
ctx->SetOutputDim("Y", x_dims);
ctx->SetOutputDim("Means", {C});
......
......@@ -31,13 +31,27 @@ class HuberLossOp : public framework::OperatorWithKernel {
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims, y_dims);
PADDLE_ENFORCE_EQ(x_dims.size(), 2,
"The rank of Input(X) must be 2 and the shape is "
"[batch_size, 1].");
PADDLE_ENFORCE_EQ(x_dims[1], 1,
"Each row of Input(X) contains a real value, "
"so the 2nd dimension of Input(X) must be 1.");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims, y_dims, "Shape of X and Y should be same");
} else {
if (x_dims[0] != -1 && y_dims[0] != -1) {
PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0],
"The dim 0 of X and Y must be the same.");
}
if (x_dims[1] != -1 && y_dims[1] != -1) {
PADDLE_ENFORCE_EQ(x_dims[1], y_dims[1],
"The dim 1 of X and Y must be the same.");
}
}
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[1], 1,
"Each row of Input(X) contains a real value, "
"so the 2nd dimension of Input(X) must be 1.");
}
ctx->SetOutputDim("Residual", x_dims);
ctx->SetOutputDim("Out", {x_dims[0], 1});
......
......@@ -46,11 +46,18 @@ class LayerNormOp : public framework::OperatorWithKernel {
int right = static_cast<int>(matrix_dim[1]);
if (ctx->HasInput("Scale")) {
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right);
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right,
"scale should with right");
}
}
if (ctx->HasInput("Bias")) {
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1);
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right);
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right,
"bias should with right");
}
}
ctx->SetOutputDim("Y", ctx->GetInputDim("X"));
......
......@@ -40,30 +40,40 @@ class PrecisionRecallOp : public framework::OperatorWithKernel {
auto max_probs_dims = ctx->GetInputDim("MaxProbs");
auto labels_dims = ctx->GetInputDim("Labels");
PADDLE_ENFORCE_EQ(max_probs_dims[1], 1,
"Each instance contains one max probability, so the "
"shape of Input(MaxProbs) should be [batch_size, 1].");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Indices"), max_probs_dims,
"The shape of Input(Indices) should be [batch_size, 1].");
PADDLE_ENFORCE_EQ(max_probs_dims[0], labels_dims[0],
"The 1st dimension of Input(MaxProbs) and "
"Input(Labels) both are batch_size and the shape should "
"be the same.");
PADDLE_ENFORCE_EQ(labels_dims[1], 1,
"The 2nd dimension of Input(Labels) contains instance "
"label and the shape should be equal to 1.");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(max_probs_dims[1], 1,
"Each instance contains one max probability, so the "
"shape of Input(MaxProbs) should be [batch_size, 1].");
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Indices"), max_probs_dims,
"The shape of Input(Indices) should bes same with max_probs_dims");
PADDLE_ENFORCE_EQ(
max_probs_dims[0], labels_dims[0],
"The 1st dimension of Input(MaxProbs) and "
"Input(Labels) both are batch_size and the shape should "
"be the same.");
PADDLE_ENFORCE_EQ(labels_dims[1], 1,
"The 2nd dimension of Input(Labels) contains instance "
"label and the shape should be equal to 1.");
}
if (ctx->HasInput("Weights")) {
auto weights_dims = ctx->GetInputDim("Weights");
PADDLE_ENFORCE_EQ(weights_dims,
framework::make_ddim({max_probs_dims[0], 1}),
"The shape of Input(Weights) should be "
"[batch_size, 1].");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(weights_dims,
framework::make_ddim({max_probs_dims[0], 1}),
"The shape of Input(Weights) should be "
"[batch_size, 1].");
}
}
if (ctx->HasInput("StatesInfo")) {
auto states_dims = ctx->GetInputDim("StatesInfo");
PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}),
"The shape of Input(StatesInfo) should be "
"[class_number, 4].");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}),
"The shape of Input(StatesInfo) should be "
"[class_number, 4].");
}
}
// Layouts of BatchMetrics and AccumMetrics both are:
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/minus_op.h"
#include <memory>
#include <string>
#include <vector>
......@@ -38,9 +39,17 @@ class MinusOp : public framework::OperatorWithKernel {
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(
x_dims, y_dims,
"Minus operator must take two tensor with same num of elements");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(
x_dims, y_dims,
"Minus operator must take two tensor with same num of elements");
} else {
if (framework::product(x_dims) > 0 && framework::product(y_dims) > 0) {
PADDLE_ENFORCE_EQ(
x_dims, y_dims,
"Minus operator must take two tensor with same num of elements");
}
}
ctx->SetOutputDim("Out", x_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
......
......@@ -28,9 +28,25 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel {
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same.");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2.");
PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1.");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims, y_dims,
"The shape of X and Y must be the same.");
} else {
if (x_dims[0] != -1 && y_dims[0] != -1) {
PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0],
"The dim 0 of X and Y must be the same.");
}
if (x_dims[1] != -1 && y_dims[1] != -1) {
PADDLE_ENFORCE_EQ(x_dims[1], y_dims[1],
"The dim 1 of X and Y must be the same.");
}
}
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1.");
}
ctx->SetOutputDim("IntermediateVal", x_dims);
ctx->SetOutputDim("Out", {x_dims[0], 1});
......@@ -90,11 +106,13 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
auto intermediate_dims = ctx->GetInputDim("IntermediateVal");
auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ(
intermediate_dims, x_dims,
"The shape of X and intermediate value must be the same.");
PADDLE_ENFORCE_EQ(out_grad_dims, x_dims,
"The shape of Input(Out@Grad) and X must be the same.");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(
intermediate_dims, x_dims,
"The shape of X and intermediate value must be the same.");
PADDLE_ENFORCE_EQ(out_grad_dims, x_dims,
"The shape of Input(Out@Grad) and X must be the same.");
}
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
......
......@@ -40,19 +40,44 @@ class SpaceToDepthOp : public framework::OperatorWithKernel {
auto blocksize = ctx->Attrs().Get<int64_t>("blocksize");
PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1");
PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
} else {
if (x_dims[1] != -1) {
PADDLE_ENFORCE_GT(x_dims[1], 0,
"input channel should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
}
if (x_dims[2] != -1) {
PADDLE_ENFORCE_GT(x_dims[2], 0,
"input Height should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
}
if (x_dims[3] != -1) {
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
}
}
VLOG(3) << "SpaceToDepthOp operator x.shape=" << x_dims
<< "Attribute blocksize" << blocksize << std::endl;
......
......@@ -64,17 +64,38 @@ class TreeConvOp : public framework::OperatorWithKernel {
auto edge_dims = ctx->GetInputDim("EdgeSet");
auto vector_dims = ctx->GetInputDim("NodesVector");
auto filter_dims = ctx->GetInputDim("Filter");
PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2");
} else {
if (edge_dims[2] != -1) {
PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2");
}
}
PADDLE_ENFORCE_EQ(edge_dims.size(), 3,
"The dimension of EdgeSet Tensor should be 3");
PADDLE_ENFORCE_EQ(vector_dims.size(), 3,
"The dimension of NodesVector Tensor should be 3");
PADDLE_ENFORCE_EQ(filter_dims.size(), 4,
"The dimension of Filter Tensor should be 4");
PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3");
PADDLE_ENFORCE_EQ(
filter_dims[0], vector_dims[2],
"Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]");
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3");
PADDLE_ENFORCE_EQ(
filter_dims[0], vector_dims[2],
"Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]");
} else {
if (filter_dims[1] != -1) {
PADDLE_ENFORCE_EQ(filter_dims[1], 3,
"Input(Filter) dim[1] should be 3");
}
if (filter_dims[0] != -1 && vector_dims[2] != -1) {
PADDLE_ENFORCE_EQ(
filter_dims[0], vector_dims[2],
"Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]");
}
}
auto output_dims = framework::make_ddim(
{vector_dims[0], vector_dims[1], filter_dims[2], filter_dims[3]});
ctx->SetOutputDim("Out", output_dims);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册