diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index be17bf9a03fc1908fccb2bb6a5f32ce49db3353d..4f22d28a450c1a4e72c9580f719442b1a8a0f81b 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -55,6 +55,16 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const { "Variance and VarianceOut should share the same memory")); const auto x_dims = ctx->GetInputDim("X"); + + for (int i = 0; i < x_dims.size(); i++) { + PADDLE_ENFORCE_EQ( + (x_dims[i] == -1) || (x_dims[i] > 0), true, + platform::errors::InvalidArgument( + "Each dimension of input tensor is expected to be -1 or a " + "positive number, but recieved %d. Input's shape is [%s].", + x_dims[i], x_dims)); + } + const DataLayout data_layout = framework::StringToDataLayout( ctx->Attrs().Get("data_layout")); diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index 28643ac1c0d832dd9550c036f0a08383b256b5f6..0a850400686c4949f6cda83f0e386d3c51d323f9 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -32,6 +32,13 @@ void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const { "InstanceNorm"); const auto x_dims = ctx->GetInputDim("X"); + PADDLE_ENFORCE_NE(framework::product(x_dims), 0, + platform::errors::PreconditionNotMet( + "The Input variable X(%s) has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.", + ctx->Inputs("X").front())); PADDLE_ENFORCE_GE( x_dims.size(), 2, platform::errors::InvalidArgument(