From 8454038498b5d93593de4fdf9509e8eab41443db Mon Sep 17 00:00:00 2001 From: Hongyu Liu <43953930+phlrain@users.noreply.github.com> Date: Wed, 17 Apr 2019 09:43:38 +0800 Subject: [PATCH] Merge pull request #16840 from phlrain/fix_shape_check_many fix shape check many by hongyu --- .../controlflow/tensor_array_read_write_op.cc | 6 ++- paddle/fluid/operators/data_norm_op.cc | 9 ++-- paddle/fluid/operators/huber_loss_op.cc | 13 +++-- paddle/fluid/operators/layer_norm_op.cc | 11 +++- .../operators/metrics/precision_recall_op.cc | 48 ++++++++++------- paddle/fluid/operators/minus_op.cc | 10 ++-- .../fluid/operators/modified_huber_loss_op.cc | 23 ++++++--- paddle/fluid/operators/space_to_depth_op.cc | 51 ++++++++++++++----- paddle/fluid/operators/tree_conv_op.cc | 31 +++++++++-- 9 files changed, 144 insertions(+), 58 deletions(-) diff --git a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc index 45f18ac9255..2ca5242c5c9 100644 --- a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc @@ -81,8 +81,10 @@ class WriteToArrayInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { PADDLE_ENFORCE(context->HasInput("I"), "Must set the subscript index"); - PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, - "The number of element of subscript index must be 1"); + if (context->IsRuntime()) { + PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, + "The number of element of subscript index must be 1"); + } if (!context->HasInput("X")) { return; } diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 45bce6e5203..a5c76db6fa4 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/data_norm_op.h" +#include #include #include "paddle/fluid/framework/data_layout.h" #ifdef PADDLE_WITH_MKLDNN @@ -65,9 +66,11 @@ class DataNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + } ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("Means", {C}); diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index 253b65a5f33..11bd5906727 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -28,13 +28,18 @@ class HuberLossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The rank of Input(X) must be 2 and the shape is " "[batch_size, 1]."); - PADDLE_ENFORCE_EQ(x_dims[1], 1, - "Each row of Input(X) contains a real value, " - "so the 2nd dimension of Input(X) must be 1."); + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { + PADDLE_ENFORCE_EQ(x_dims, y_dims, "Shape of X and Y should be same"); + } + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[1], 1, + "Each row of Input(X) contains a real value, " + "so the 2nd dimension of Input(X) must be 1."); + } ctx->SetOutputDim("Residual", x_dims); ctx->SetOutputDim("Out", {x_dims[0], 1}); diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index 9b1a854a312..1aac60ef36c 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -46,11 +46,18 @@ class LayerNormOp : public framework::OperatorWithKernel { int right = static_cast(matrix_dim[1]); if (ctx->HasInput("Scale")) { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right, + "scale should with right"); + } } if (ctx->HasInput("Bias")) { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right, + "bias should with right"); + } } ctx->SetOutputDim("Y", ctx->GetInputDim("X")); diff --git a/paddle/fluid/operators/metrics/precision_recall_op.cc b/paddle/fluid/operators/metrics/precision_recall_op.cc index 1a67b134914..f6d6ffc668c 100644 --- a/paddle/fluid/operators/metrics/precision_recall_op.cc +++ b/paddle/fluid/operators/metrics/precision_recall_op.cc @@ -40,30 +40,40 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { auto max_probs_dims = ctx->GetInputDim("MaxProbs"); auto labels_dims = ctx->GetInputDim("Labels"); - PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, - "Each instance contains one max probability, so the " - "shape of Input(MaxProbs) should be [batch_size, 1]."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Indices"), max_probs_dims, - "The shape of Input(Indices) should be [batch_size, 1]."); - PADDLE_ENFORCE_EQ(max_probs_dims[0], labels_dims[0], - "The 1st dimension of Input(MaxProbs) and " - "Input(Labels) both are batch_size and the shape should " - "be the same."); - PADDLE_ENFORCE_EQ(labels_dims[1], 1, - "The 2nd dimension of Input(Labels) contains instance " - "label and the shape should be equal to 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, + "Each instance contains one max probability, so the " + "shape of Input(MaxProbs) should be [batch_size, 1]."); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Indices"), max_probs_dims, + "The shape of Input(Indices) should bes same with max_probs_dims"); + PADDLE_ENFORCE_EQ( + max_probs_dims[0], labels_dims[0], + "The 1st dimension of Input(MaxProbs) and " + "Input(Labels) both are batch_size and the shape should " + "be the same."); + PADDLE_ENFORCE_EQ(labels_dims[1], 1, + "The 2nd dimension of Input(Labels) contains instance " + "label and the shape should be equal to 1."); + } if (ctx->HasInput("Weights")) { auto weights_dims = ctx->GetInputDim("Weights"); - PADDLE_ENFORCE_EQ(weights_dims, - framework::make_ddim({max_probs_dims[0], 1}), - "The shape of Input(Weights) should be " - "[batch_size, 1]."); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(weights_dims, + framework::make_ddim({max_probs_dims[0], 1}), + "The shape of Input(Weights) should be " + "[batch_size, 1]."); + } } if (ctx->HasInput("StatesInfo")) { auto states_dims = ctx->GetInputDim("StatesInfo"); - PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), - "The shape of Input(StatesInfo) should be " - "[class_number, 4]."); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), + "The shape of Input(StatesInfo) should be " + "[class_number, 4]."); + } } // Layouts of BatchMetrics and AccumMetrics both are: diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 34571a38a14..02a90d77b6e 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/minus_op.h" +#include #include #include @@ -38,9 +39,12 @@ class MinusOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ( - x_dims, y_dims, - "Minus operator must take two tensor with same num of elements"); + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { + PADDLE_ENFORCE_EQ( + x_dims, y_dims, + "Minus operator must take two tensor with same num of elements"); + } ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index 9954e51083b..14d75aee754 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -28,9 +28,16 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2."); - PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { + PADDLE_ENFORCE_EQ(x_dims, y_dims, + "The shape of X and Y must be the same."); + } + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); + } ctx->SetOutputDim("IntermediateVal", x_dims); ctx->SetOutputDim("Out", {x_dims[0], 1}); @@ -90,11 +97,13 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { auto intermediate_dims = ctx->GetInputDim("IntermediateVal"); auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out")); - PADDLE_ENFORCE_EQ( - intermediate_dims, x_dims, - "The shape of X and intermediate value must be the same."); - PADDLE_ENFORCE_EQ(out_grad_dims, x_dims, - "The shape of Input(Out@Grad) and X must be the same."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ( + intermediate_dims, x_dims, + "The shape of X and intermediate value must be the same."); + PADDLE_ENFORCE_EQ(out_grad_dims, x_dims, + "The shape of Input(Out@Grad) and X must be the same."); + } if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), x_dims); diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc index b579244673f..bf94fa82465 100644 --- a/paddle/fluid/operators/space_to_depth_op.cc +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -34,19 +34,44 @@ class SpaceToDepthOp : public framework::OperatorWithKernel { auto blocksize = ctx->Attrs().Get("blocksize"); PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1"); - PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); - PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); - PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); - - PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, - "input channel should be divisible of the square of " - "SpaceToDepthOp blocksize"); - PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, - "input Height should be divisible of the square of " - "SpaceToDepthOp blocksize"); - PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, - "input Width should be divisible of the square of " - "SpaceToDepthOp blocksize"); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } else { + if (x_dims[1] != -1) { + PADDLE_ENFORCE_GT(x_dims[1], 0, + "input channel should be Greater than 0"); + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + if (x_dims[2] != -1) { + PADDLE_ENFORCE_GT(x_dims[2], 0, + "input Height should be Greater than 0"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + + if (x_dims[3] != -1) { + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + } VLOG(3) << "SpaceToDepthOp operator x.shape=" << x_dims << "Attribute blocksize" << blocksize << std::endl; diff --git a/paddle/fluid/operators/tree_conv_op.cc b/paddle/fluid/operators/tree_conv_op.cc index 615ea285e54..82dfbc205bc 100644 --- a/paddle/fluid/operators/tree_conv_op.cc +++ b/paddle/fluid/operators/tree_conv_op.cc @@ -62,17 +62,38 @@ class TreeConvOp : public framework::OperatorWithKernel { auto edge_dims = ctx->GetInputDim("EdgeSet"); auto vector_dims = ctx->GetInputDim("NodesVector"); auto filter_dims = ctx->GetInputDim("Filter"); - PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + } else { + if (edge_dims[2] != -1) { + PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + } + } PADDLE_ENFORCE_EQ(edge_dims.size(), 3, "The dimension of EdgeSet Tensor should be 3"); PADDLE_ENFORCE_EQ(vector_dims.size(), 3, "The dimension of NodesVector Tensor should be 3"); PADDLE_ENFORCE_EQ(filter_dims.size(), 4, "The dimension of Filter Tensor should be 4"); - PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3"); - PADDLE_ENFORCE_EQ( - filter_dims[0], vector_dims[2], - "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3"); + PADDLE_ENFORCE_EQ( + filter_dims[0], vector_dims[2], + "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + } else { + if (filter_dims[1] != -1) { + PADDLE_ENFORCE_EQ(filter_dims[1], 3, + "Input(Filter) dim[1] should be 3"); + } + + if (filter_dims[0] != -1 && vector_dims[2] != -1) { + PADDLE_ENFORCE_EQ( + filter_dims[0], vector_dims[2], + "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + } + } auto output_dims = framework::make_ddim( {vector_dims[0], vector_dims[1], filter_dims[2], filter_dims[3]}); ctx->SetOutputDim("Out", output_dims); -- GitLab