diff --git a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc index 45f18ac9255bdd75d8cbb5e1dd30ebba52260850..2ca5242c5c935e2156bf95689c53b0c29809c235 100644 --- a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc @@ -81,8 +81,10 @@ class WriteToArrayInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { PADDLE_ENFORCE(context->HasInput("I"), "Must set the subscript index"); - PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, - "The number of element of subscript index must be 1"); + if (context->IsRuntime()) { + PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, + "The number of element of subscript index must be 1"); + } if (!context->HasInput("X")) { return; } diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 45bce6e5203f8c1dbb744e0f954f7f0a71c53372..a5c76db6fa44217a558cfaecd2d7628168c11d78 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/data_norm_op.h" +#include #include #include "paddle/fluid/framework/data_layout.h" #ifdef PADDLE_WITH_MKLDNN @@ -65,9 +66,11 @@ class DataNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + } ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("Means", {C}); diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index 253b65a5f33308fc2c94537641b0fa19378b0cc9..11bd5906727d4b5516c6d3b3580d3998b36f9f27 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -28,13 +28,18 @@ class HuberLossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The rank of Input(X) must be 2 and the shape is " "[batch_size, 1]."); - PADDLE_ENFORCE_EQ(x_dims[1], 1, - "Each row of Input(X) contains a real value, " - "so the 2nd dimension of Input(X) must be 1."); + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { + PADDLE_ENFORCE_EQ(x_dims, y_dims, "Shape of X and Y should be same"); + } + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[1], 1, + "Each row of Input(X) contains a real value, " + "so the 2nd dimension of Input(X) must be 1."); + } ctx->SetOutputDim("Residual", x_dims); ctx->SetOutputDim("Out", {x_dims[0], 1}); diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index 9b1a854a312551732424e0d127a43328b8db6085..1aac60ef36c62703f8f9a3b896c17a1483642f53 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -46,11 +46,18 @@ class LayerNormOp : public framework::OperatorWithKernel { int right = static_cast(matrix_dim[1]); if (ctx->HasInput("Scale")) { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right, + "scale should with right"); + } } if (ctx->HasInput("Bias")) { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right, + "bias should with right"); + } } ctx->SetOutputDim("Y", ctx->GetInputDim("X")); diff --git a/paddle/fluid/operators/metrics/precision_recall_op.cc b/paddle/fluid/operators/metrics/precision_recall_op.cc index 1a67b134914053642377ec2623e68ab5a3e9ba50..f6d6ffc668c9aaa40e12e7289d4f97fc656e2c70 100644 --- a/paddle/fluid/operators/metrics/precision_recall_op.cc +++ b/paddle/fluid/operators/metrics/precision_recall_op.cc @@ -40,30 +40,40 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { auto max_probs_dims = ctx->GetInputDim("MaxProbs"); auto labels_dims = ctx->GetInputDim("Labels"); - PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, - "Each instance contains one max probability, so the " - "shape of Input(MaxProbs) should be [batch_size, 1]."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Indices"), max_probs_dims, - "The shape of Input(Indices) should be [batch_size, 1]."); - PADDLE_ENFORCE_EQ(max_probs_dims[0], labels_dims[0], - "The 1st dimension of Input(MaxProbs) and " - "Input(Labels) both are batch_size and the shape should " - "be the same."); - PADDLE_ENFORCE_EQ(labels_dims[1], 1, - "The 2nd dimension of Input(Labels) contains instance " - "label and the shape should be equal to 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, + "Each instance contains one max probability, so the " + "shape of Input(MaxProbs) should be [batch_size, 1]."); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Indices"), max_probs_dims, + "The shape of Input(Indices) should bes same with max_probs_dims"); + PADDLE_ENFORCE_EQ( + max_probs_dims[0], labels_dims[0], + "The 1st dimension of Input(MaxProbs) and " + "Input(Labels) both are batch_size and the shape should " + "be the same."); + PADDLE_ENFORCE_EQ(labels_dims[1], 1, + "The 2nd dimension of Input(Labels) contains instance " + "label and the shape should be equal to 1."); + } if (ctx->HasInput("Weights")) { auto weights_dims = ctx->GetInputDim("Weights"); - PADDLE_ENFORCE_EQ(weights_dims, - framework::make_ddim({max_probs_dims[0], 1}), - "The shape of Input(Weights) should be " - "[batch_size, 1]."); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(weights_dims, + framework::make_ddim({max_probs_dims[0], 1}), + "The shape of Input(Weights) should be " + "[batch_size, 1]."); + } } if (ctx->HasInput("StatesInfo")) { auto states_dims = ctx->GetInputDim("StatesInfo"); - PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), - "The shape of Input(StatesInfo) should be " - "[class_number, 4]."); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), + "The shape of Input(StatesInfo) should be " + "[class_number, 4]."); + } } // Layouts of BatchMetrics and AccumMetrics both are: diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 34571a38a14795a98ac8454cec606077727b5ffa..02a90d77b6e54475f4e722266d0a3b2046ea33ed 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/minus_op.h" +#include #include #include @@ -38,9 +39,12 @@ class MinusOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ( - x_dims, y_dims, - "Minus operator must take two tensor with same num of elements"); + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { + PADDLE_ENFORCE_EQ( + x_dims, y_dims, + "Minus operator must take two tensor with same num of elements"); + } ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index 9954e51083b2c4dbc043fe82ee75be91c6d60128..14d75aee754bc3d5b951a4f53a34ea8661c08cca 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -28,9 +28,16 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2."); - PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { + PADDLE_ENFORCE_EQ(x_dims, y_dims, + "The shape of X and Y must be the same."); + } + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); + } ctx->SetOutputDim("IntermediateVal", x_dims); ctx->SetOutputDim("Out", {x_dims[0], 1}); @@ -90,11 +97,13 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { auto intermediate_dims = ctx->GetInputDim("IntermediateVal"); auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out")); - PADDLE_ENFORCE_EQ( - intermediate_dims, x_dims, - "The shape of X and intermediate value must be the same."); - PADDLE_ENFORCE_EQ(out_grad_dims, x_dims, - "The shape of Input(Out@Grad) and X must be the same."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ( + intermediate_dims, x_dims, + "The shape of X and intermediate value must be the same."); + PADDLE_ENFORCE_EQ(out_grad_dims, x_dims, + "The shape of Input(Out@Grad) and X must be the same."); + } if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), x_dims); diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc index b579244673fa1618c282c4d4fedf2ba6d1726a82..bf94fa824651353633ddb0be20a69cd47eb0a20f 100644 --- a/paddle/fluid/operators/space_to_depth_op.cc +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -34,19 +34,44 @@ class SpaceToDepthOp : public framework::OperatorWithKernel { auto blocksize = ctx->Attrs().Get("blocksize"); PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1"); - PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); - PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); - PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); - - PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, - "input channel should be divisible of the square of " - "SpaceToDepthOp blocksize"); - PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, - "input Height should be divisible of the square of " - "SpaceToDepthOp blocksize"); - PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, - "input Width should be divisible of the square of " - "SpaceToDepthOp blocksize"); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } else { + if (x_dims[1] != -1) { + PADDLE_ENFORCE_GT(x_dims[1], 0, + "input channel should be Greater than 0"); + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + if (x_dims[2] != -1) { + PADDLE_ENFORCE_GT(x_dims[2], 0, + "input Height should be Greater than 0"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + + if (x_dims[3] != -1) { + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + } VLOG(3) << "SpaceToDepthOp operator x.shape=" << x_dims << "Attribute blocksize" << blocksize << std::endl; diff --git a/paddle/fluid/operators/tree_conv_op.cc b/paddle/fluid/operators/tree_conv_op.cc index 615ea285e54b97a8fb81acfef9bf0d18ac4e914d..82dfbc205bcbb230e236c9e995f7c41c08fc6941 100644 --- a/paddle/fluid/operators/tree_conv_op.cc +++ b/paddle/fluid/operators/tree_conv_op.cc @@ -62,17 +62,38 @@ class TreeConvOp : public framework::OperatorWithKernel { auto edge_dims = ctx->GetInputDim("EdgeSet"); auto vector_dims = ctx->GetInputDim("NodesVector"); auto filter_dims = ctx->GetInputDim("Filter"); - PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + } else { + if (edge_dims[2] != -1) { + PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + } + } PADDLE_ENFORCE_EQ(edge_dims.size(), 3, "The dimension of EdgeSet Tensor should be 3"); PADDLE_ENFORCE_EQ(vector_dims.size(), 3, "The dimension of NodesVector Tensor should be 3"); PADDLE_ENFORCE_EQ(filter_dims.size(), 4, "The dimension of Filter Tensor should be 4"); - PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3"); - PADDLE_ENFORCE_EQ( - filter_dims[0], vector_dims[2], - "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3"); + PADDLE_ENFORCE_EQ( + filter_dims[0], vector_dims[2], + "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + } else { + if (filter_dims[1] != -1) { + PADDLE_ENFORCE_EQ(filter_dims[1], 3, + "Input(Filter) dim[1] should be 3"); + } + + if (filter_dims[0] != -1 && vector_dims[2] != -1) { + PADDLE_ENFORCE_EQ( + filter_dims[0], vector_dims[2], + "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + } + } auto output_dims = framework::make_ddim( {vector_dims[0], vector_dims[1], filter_dims[2], filter_dims[3]}); ctx->SetOutputDim("Out", output_dims);