From fe43b2ee2376f2e10578cff4f541dd774d70b166 Mon Sep 17 00:00:00 2001 From: wawltor <980627148@qq.com> Date: Thu, 13 Jun 2019 13:53:54 +0800 Subject: [PATCH] Fix bug in sequence_unpad op when allocating the output memory Fix #17955 Fix bug in sequence_unpad op, when allocate the output memory do not match actual memory, check memory failed. Fix this bug by allocating the output memeory in correct code position. --- paddle/fluid/operators/sequence_ops/sequence_unpad_op.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h index fe8ca41b69..70f26055b7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h @@ -33,7 +33,6 @@ class SequenceUnpadOpKernel : public framework::OpKernel { auto* x_t = ctx.Input("X"); auto* len_t = ctx.Input("Length"); auto* out_t = ctx.Output("Out"); - out_t->mutable_data(ctx.GetPlace()); const int64_t* seq_len_ptr = nullptr; if (platform::is_gpu_place(ctx.GetPlace())) { @@ -67,6 +66,9 @@ class SequenceUnpadOpKernel : public framework::OpKernel { } out_t->Resize(framework::make_ddim(out_dims_vec)); + // after set the lod of output, allocate the memory + out_t->mutable_data(ctx.GetPlace()); + int64_t padded_length = x_t->dims()[1]; math::UnpaddingLoDTensorFunctor()( ctx.template device_context(), *x_t, out_t, -- GitLab