From 2a36ad1a9655c3f618d4f77ca753f8b0fb214399 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 22 Aug 2018 06:48:00 +0000 Subject: [PATCH] Handle LoD for concat & seq_softmax ops --- paddle/fluid/operators/concat_op.h | 16 ++++++++++++++-- paddle/fluid/operators/math/concat.cc | 2 +- paddle/fluid/operators/math/concat.cu | 2 +- paddle/fluid/operators/math/concat.h | 4 ++-- paddle/fluid/operators/sequence_softmax_op.h | 3 +++ 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index a496301526..78be2e1e1f 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -62,9 +62,21 @@ class ConcatGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const { auto* out_grad = ctx.Input(framework::GradVarName("Out")); - auto ins = ctx.MultiInput("X"); + auto ins = ctx.MultiInput("X"); auto out_var_names = ctx.Outputs(framework::GradVarName("X")); - auto outs = ctx.MultiOutput(framework::GradVarName("X")); + auto outs = + ctx.MultiOutput(framework::GradVarName("X")); + + { + auto dx = outs; + auto x = ins; + for (size_t i = 0; i < dx.size(); ++i) { + if (dx[i] != nullptr) { + dx[i]->set_lod(x[i]->lod()); + } + } + } + int64_t axis = static_cast(ctx.Attr("axis")); // get output tensor that the name is not kEmptyVarName diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index 55c8a472ac..fbe7c29783 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -71,7 +71,7 @@ class ConcatGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, - const std::vector& ref_inputs, + const std::vector& ref_inputs, const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking size_t num = outputs->size(); diff --git a/paddle/fluid/operators/math/concat.cu b/paddle/fluid/operators/math/concat.cu index 5863d74fca..820e73e779 100644 --- a/paddle/fluid/operators/math/concat.cu +++ b/paddle/fluid/operators/math/concat.cu @@ -189,7 +189,7 @@ class ConcatGradFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, - const std::vector& ref_inputs, + const std::vector& ref_inputs, const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking int o_num = outputs->size(); diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat.h index 9e080f2e8b..e5d7d860b3 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" namespace paddle { namespace operators { @@ -57,7 +57,7 @@ template class ConcatGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - const std::vector& ref_inputs, + const std::vector& ref_inputs, const int axis, std::vector* outputs); }; diff --git a/paddle/fluid/operators/sequence_softmax_op.h b/paddle/fluid/operators/sequence_softmax_op.h index cb93a02b83..bca564e16f 100644 --- a/paddle/fluid/operators/sequence_softmax_op.h +++ b/paddle/fluid/operators/sequence_softmax_op.h @@ -66,6 +66,9 @@ class SequenceSoftmaxGradKernel : public framework::OpKernel { auto* out_grad = ctx.Input(framework::GradVarName("Out")); auto* x = ctx.Input("X"); auto* x_grad = ctx.Output(framework::GradVarName("X")); + if (x_grad) { + x_grad->set_lod(x->lod()); + } auto lod = x->lod(); const size_t level = lod.size() - 1; -- GitLab