diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index cbb2c79c5c47d82b576b18a1790fb8f68391acd6..ae33c993efe8fe29ba08d69d41e5826571c0fb85 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -721,6 +721,9 @@ CompileTimeInferShapeContext::CompileTimeInferShapeContext( : op_(op), block_(block) {} bool CompileTimeInferShapeContext::HasInput(const std::string &name) const { + if (op_.Inputs().find(name) == op_.Inputs().end()) { + return false; + } const std::vector &input_names = op_.Input(name); auto length = input_names.size(); if (length == 0) { @@ -734,6 +737,9 @@ bool CompileTimeInferShapeContext::HasInput(const std::string &name) const { } bool CompileTimeInferShapeContext::HasOutput(const std::string &name) const { + if (op_.Outputs().find(name) == op_.Outputs().end()) { + return false; + } const std::vector &output_names = op_.Output(name); auto length = output_names.size(); if (length == 0) { @@ -747,6 +753,9 @@ bool CompileTimeInferShapeContext::HasOutput(const std::string &name) const { } bool CompileTimeInferShapeContext::HasInputs(const std::string &name) const { + if (op_.Inputs().find(name) == op_.Inputs().end()) { + return false; + } const std::vector &input_names = op_.Input(name); if (input_names.empty()) { return false; @@ -758,6 +767,9 @@ bool CompileTimeInferShapeContext::HasInputs(const std::string &name) const { } bool CompileTimeInferShapeContext::HasOutputs(const std::string &name) const { + if (op_.Outputs().find(name) == op_.Outputs().end()) { + return false; + } const std::vector &output_names = op_.Output(name); if (output_names.empty()) { return false; diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 8fb12a952f6c6ecb05e0b6e586c1365773612bea..9794ddc672ab3cc1c99257e03870c56caab0582d 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -25,6 +25,16 @@ class FillConstantOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "FillConstant"); auto& shape = ctx->Attrs().Get>("shape"); + if (!ctx->HasInput("ShapeTensor") && !ctx->HasInputs("ShapeTensorList")) { + for (size_t i = 0; i < shape.size(); ++i) { + PADDLE_ENFORCE_GE( + shape[i], 0, + platform::errors::InvalidArgument( + "Each value of attribute 'shape' is expected to be greater " + "than 0. But recieved: shape[%u] = %d; shape = [%s].", + i, shape[i], framework::make_ddim(shape))); + } + } if (shape.empty() && ctx->HasInput("ShapeTensor")) { auto shape_dims = ctx->GetInputDim("ShapeTensor"); diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index 8b0b144c6d8815e9442e0981ef8f32c31d5758bd..6ca194b2694b6c7537ceb94e11eb1a1a0aeb8d8d 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -369,11 +369,11 @@ class SeqPGAgent(object): self.probs, self.samples, self.sample_length = self.model( source, source_length, target, target_length) self.samples.stop_gradient = True - self.reward = fluid.layers.create_global_var( + self.reward = fluid.data( name="reward", - shape=[-1, -1], # batch_size, seq_len - value="1", + shape=[None, None], # batch_size, seq_len dtype=self.probs.dtype) + self.samples.stop_gradient = False self.cost = self.alg.learn(self.probs, self.samples, self.reward, self.sample_length)