diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 6059961ad8869fe14d9b600ab2469e385091bf3e..e8ecd90502933a049cc8f886212579fc061d44ff 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -81,13 +81,35 @@ class CompileTimeInferShapeContext : public InferShapeContext { "The %s[%d] is @EMPTY@", out, j); auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); - if (in_var->GetType() != proto::VarType::LOD_TENSOR) { - VLOG(3) << "input " << in << " is not LodTensor"; + if (in_var->GetType() != proto::VarType::LOD_TENSOR && + in_var->GetType() != proto::VarType::LOD_TENSOR_ARRAY) { + VLOG(3) << "input " << in << " is not LodTensor or LodTensorArray."; return; } out_var->SetLoDLevel(in_var->GetLoDLevel()); } + void DecreaseLoDLevel(const std::string &in, const std::string &out, + size_t i = 0, size_t j = 0) const override { + PADDLE_ENFORCE_LT(i, Inputs(in).size()); + PADDLE_ENFORCE_LT(j, Outputs(out).size()); + PADDLE_ENFORCE(Inputs(in)[i] != framework::kEmptyVarName, + "The %s[%d] is @EMPTY@", in, i); + PADDLE_ENFORCE(Outputs(out)[j] != framework::kEmptyVarName, + "The %s[%d] is @EMPTY@", out, j); + auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); + auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); + PADDLE_ENFORCE(out_var->GetType() == proto::VarType::LOD_TENSOR_ARRAY || + out_var->GetType() == proto::VarType::LOD_TENSOR, + "The input %s should be LodTensorArray or LodTensor.", + out_var->Name()); + PADDLE_ENFORCE(in_var->GetType() == proto::VarType::LOD_TENSOR, + "The input %s should be LodTensor.", in_var->Name()); + if (in_var->GetLoDLevel() > 0) { + out_var->SetLoDLevel(in_var->GetLoDLevel() - 1); + } + } + bool IsRuntime() const override; protected: diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 2260353af7bef11257e905d8ff2eae96268ffd01..8bfdf3891203823826fd5bf919c176011f22213c 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -623,6 +623,11 @@ class RuntimeInferShapeContext : public InferShapeContext { out_tensor->set_layout(in_tensor.layout()); } + void DecreaseLoDLevel(const std::string& in, const std::string& out, + size_t i = 0, size_t j = 0) const override { + PADDLE_THROW("DecreaseLoDLevel is only used in compile time."); + } + bool IsRuntime() const override { return true; } protected: diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index 280bc19dce7b604d67aefdc572de96b479b8d2d7..d73cca121e41e68f9fb6548117ed91c5cc1415ca 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -62,6 +62,9 @@ class InferShapeContext { virtual void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) const = 0; + virtual void DecreaseLoDLevel(const std::string &in, const std::string &out, + size_t i = 0, size_t j = 0) const = 0; + virtual bool IsRuntime() const = 0; std::vector GetInputVarPtrs(const std::string &name); diff --git a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc index a2d44284e9de1ace42cabbce82e0b45929432d7b..fa18ade3234ed1802bb44ad622f9041dc73d84ee 100644 --- a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc @@ -167,6 +167,19 @@ $$T = A[i]$$ }; class ReadFromArrayInferShape : public WriteToArrayInferShape { + public: + void operator()(framework::InferShapeContext *context) const override { + WriteToArrayInferShape::operator()(context); + if (!context->HasInput("X")) { + return; + } + + // FIXME: just for compile time. + if (!context->IsRuntime()) { + context->ShareLoD("X", /*->*/ "Out"); + } + } + protected: const char *NotHasXError() const override { return "The input array X must be set"; diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index e72337a3e6f7884c3a05372e8732647e5910f3e4..145d2db118fbe36f0d8f09fdbfa9ac30dea18f01 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -192,6 +192,10 @@ class LoDTensorToArrayInferShape : public framework::InferShapeBase { // The first dim of each LoDTensor in Output can only be set at run-time.; // We still have to Resize each LoDTensor in Output. context->SetOutputDim("Out", x_dim); + // The lod level should be passed to out in compile time. + if (!context->IsRuntime()) { + context->DecreaseLoDLevel("X", /*->*/ "Out"); + } } }; diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index e4f4fe358e0e8cd2080525227f14a3d40f3c1411..7ceb5b58465bcdfa22345944bf8140793f187498 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -201,6 +201,9 @@ class IdentityInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { context->SetOutputDim("Out", context->GetInputDim("X")); + if (!context->IsRuntime()) { + context->ShareLoD("X", /*->*/ "Out"); + } } }; diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index e1c74c3a2f89235ba92c396d1a548271bb7d939d..2e2aea2c632d8e4e0abbcd2cac562e492e0f552f 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -100,6 +100,9 @@ class ShrinkRNNMemoryInferShape : public framework::InferShapeBase { PADDLE_ENFORCE(context->HasInput("I")); PADDLE_ENFORCE(context->HasInput("RankTable")); context->SetOutputDim("Out", context->GetInputDim("X")); + if (!context->IsRuntime()) { + context->DecreaseLoDLevel("X", /*->*/ "Out"); + } } }; diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index 3191eb94d753435d31f1849be2d97b1cf89b220c..48fb93ec529bee32b9652a89ba7da3dc77f7853a 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -172,6 +172,7 @@ class TestDynRNN(unittest.TestCase): rnn = fluid.layers.DynamicRNN() with rnn.block(): in_ = rnn.step_input(sentence) + assert in_.lod_level == 1, "the lod level of in_ should be 1" sent_emb = fluid.layers.embedding( input=in_, size=[len(word_dict), 32], dtype='float32') out_ = fluid.layers.fc(input=sent_emb, size=100, act='tanh') @@ -179,6 +180,7 @@ class TestDynRNN(unittest.TestCase): rnn1 = fluid.layers.DynamicRNN() with rnn1.block(): in_1 = rnn1.step_input(out_) + assert in_1.lod_level == 0, "the lod level of in_1 should be 0" out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh') rnn1.output(out_1)