From 66ae0a8cb28fdd725edd8e10cf2e63a3bb44e761 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Wed, 3 Jan 2018 22:32:34 +0800 Subject: [PATCH] Enhence shrink_rnn_memory_op. --- paddle/operators/shrink_rnn_memory_op.cc | 21 ++++++++++++++----- .../v2/fluid/tests/test_shrink_rnn_memory.py | 8 +++---- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index b37269b471b..cc9e3f90b42 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -46,8 +46,19 @@ class ShrinkRNNMemoryOp : public ArrayOp { auto *out_var = scope.FindVar(Output("Out")); PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set"); auto &out_tensor = *out_var->GetMutable(); + + // should consider multiple levels + size_t height = dst_num_rows; + auto lod_level = lod_rank_table.level(); + if (x_tensor.lod().size() > lod_level && + x_tensor.lod()[lod_level].size() < dst_num_rows) { + auto lod_offset = framework::GetSubLoDAndAbsoluteOffset( + x_tensor.lod(), 0, dst_num_rows + 1, lod_level); + height = lod_offset.second.second; + } + if (dst_num_rows != 0) { - out_tensor.ShareDataWith(x_tensor.Slice(0, dst_num_rows)); + out_tensor.ShareDataWith(x_tensor.Slice(0, height)); } } }; @@ -64,11 +75,11 @@ class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(LoDTensor) The shrinked RNN step memory."); AddComment( R"DOC( - In dynamic RNN, we are able to handle sequences of different lengths. - Because of the multiple lengths, the size of each step input can be + In dynamic RNN, we are able to handle sequences of different lengths. + Because of the multiple lengths, the size of each step input can be different, which may lead to a mismatching between the input of - the current step and the memory generated by the previous one. This - operator shrinks memory according to the size of the next step input, + the current step and the memory generated by the previous one. This + operator shrinks memory according to the size of the next step input, to make sure that they can match each other. )DOC"); } diff --git a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py index be1588fc2d0..707dbd793a0 100644 --- a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py @@ -26,13 +26,13 @@ class TestShrinkRNNMemory(unittest.TestCase): cpu = core.CPUPlace() tensor = core.LoDTensor() tensor.set_lod([[0, 2, 5, 6]]) - tensor_np = numpy.random.random(size=(3, 100)).astype('float32') + tensor_np = numpy.random.random(size=(6, 100)).astype('float32') tensor.set(tensor_np, cpu) exe = Executor(cpu) outs = exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]) - self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0])) - self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1])) - self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2])) + self.assertTrue(numpy.allclose(tensor_np[0:6], outs[0])) + self.assertTrue(numpy.allclose(tensor_np[0:5], outs[1])) + self.assertTrue(numpy.allclose(tensor_np[0:2], outs[2])) mem3_mean = layers.mean(x=mem3) append_backward(loss=mem3_mean) -- GitLab