From 9e85d0237379218bd0991ef33ea00d7e0a50a703 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Sun, 12 Apr 2020 19:39:12 +0800 Subject: [PATCH] Avoid crash when calling ctx->HasInputs and add the check of shape in fill_copnstant op. (#23698) --- paddle/fluid/framework/op_desc.cc | 12 ++++++++++++ paddle/fluid/operators/fill_constant_op.cc | 10 ++++++++++ .../fluid/tests/unittests/test_rnn_decode_api.py | 6 +++--- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index cbb2c79c5c..ae33c993ef 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -721,6 +721,9 @@ CompileTimeInferShapeContext::CompileTimeInferShapeContext( : op_(op), block_(block) {} bool CompileTimeInferShapeContext::HasInput(const std::string &name) const { + if (op_.Inputs().find(name) == op_.Inputs().end()) { + return false; + } const std::vector &input_names = op_.Input(name); auto length = input_names.size(); if (length == 0) { @@ -734,6 +737,9 @@ bool CompileTimeInferShapeContext::HasInput(const std::string &name) const { } bool CompileTimeInferShapeContext::HasOutput(const std::string &name) const { + if (op_.Outputs().find(name) == op_.Outputs().end()) { + return false; + } const std::vector &output_names = op_.Output(name); auto length = output_names.size(); if (length == 0) { @@ -747,6 +753,9 @@ bool CompileTimeInferShapeContext::HasOutput(const std::string &name) const { } bool CompileTimeInferShapeContext::HasInputs(const std::string &name) const { + if (op_.Inputs().find(name) == op_.Inputs().end()) { + return false; + } const std::vector &input_names = op_.Input(name); if (input_names.empty()) { return false; @@ -758,6 +767,9 @@ bool CompileTimeInferShapeContext::HasInputs(const std::string &name) const { } bool CompileTimeInferShapeContext::HasOutputs(const std::string &name) const { + if (op_.Outputs().find(name) == op_.Outputs().end()) { + return false; + } const std::vector &output_names = op_.Output(name); if (output_names.empty()) { return false; diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 8fb12a952f..9794ddc672 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -25,6 +25,16 @@ class FillConstantOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "FillConstant"); auto& shape = ctx->Attrs().Get>("shape"); + if (!ctx->HasInput("ShapeTensor") && !ctx->HasInputs("ShapeTensorList")) { + for (size_t i = 0; i < shape.size(); ++i) { + PADDLE_ENFORCE_GE( + shape[i], 0, + platform::errors::InvalidArgument( + "Each value of attribute 'shape' is expected to be greater " + "than 0. But recieved: shape[%u] = %d; shape = [%s].", + i, shape[i], framework::make_ddim(shape))); + } + } if (shape.empty() && ctx->HasInput("ShapeTensor")) { auto shape_dims = ctx->GetInputDim("ShapeTensor"); diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index 8b0b144c6d..6ca194b269 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -369,11 +369,11 @@ class SeqPGAgent(object): self.probs, self.samples, self.sample_length = self.model( source, source_length, target, target_length) self.samples.stop_gradient = True - self.reward = fluid.layers.create_global_var( + self.reward = fluid.data( name="reward", - shape=[-1, -1], # batch_size, seq_len - value="1", + shape=[None, None], # batch_size, seq_len dtype=self.probs.dtype) + self.samples.stop_gradient = False self.cost = self.alg.learn(self.probs, self.samples, self.reward, self.sample_length) -- GitLab