diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index a496301526f58875ff51aeaa5b2094c3c656531c..78be2e1e1f06c7a518e35a770c1dc9581b2d10fe 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -62,9 +62,21 @@ class ConcatGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const { auto* out_grad = ctx.Input(framework::GradVarName("Out")); - auto ins = ctx.MultiInput("X"); + auto ins = ctx.MultiInput("X"); auto out_var_names = ctx.Outputs(framework::GradVarName("X")); - auto outs = ctx.MultiOutput(framework::GradVarName("X")); + auto outs = + ctx.MultiOutput(framework::GradVarName("X")); + + { + auto dx = outs; + auto x = ins; + for (size_t i = 0; i < dx.size(); ++i) { + if (dx[i] != nullptr) { + dx[i]->set_lod(x[i]->lod()); + } + } + } + int64_t axis = static_cast(ctx.Attr("axis")); // get output tensor that the name is not kEmptyVarName diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index 55c8a472aca7fe700ef6a3f96bed1496d7b12b80..fbe7c2978385401b35765101c87387ff727be4e0 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -71,7 +71,7 @@ class ConcatGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, - const std::vector& ref_inputs, + const std::vector& ref_inputs, const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking size_t num = outputs->size(); diff --git a/paddle/fluid/operators/math/concat.cu b/paddle/fluid/operators/math/concat.cu index 5863d74fca21de8b77bc208fb95d8fd52562f7a7..820e73e779720e4f76168e0a84a254ef645784ee 100644 --- a/paddle/fluid/operators/math/concat.cu +++ b/paddle/fluid/operators/math/concat.cu @@ -189,7 +189,7 @@ class ConcatGradFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, - const std::vector& ref_inputs, + const std::vector& ref_inputs, const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking int o_num = outputs->size(); diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat.h index 9e080f2e8be23768dcea47b577043beef37b2eaf..e5d7d860b371677b3cfc67a57390bdee0d0ecc37 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" namespace paddle { namespace operators { @@ -57,7 +57,7 @@ template class ConcatGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - const std::vector& ref_inputs, + const std::vector& ref_inputs, const int axis, std::vector* outputs); }; diff --git a/paddle/fluid/operators/sequence_softmax_op.h b/paddle/fluid/operators/sequence_softmax_op.h index cb93a02b8386ed50ff176fc25b88449b7eb16902..bca564e16f9951519eefe25126aadebb4c1326b6 100644 --- a/paddle/fluid/operators/sequence_softmax_op.h +++ b/paddle/fluid/operators/sequence_softmax_op.h @@ -66,6 +66,9 @@ class SequenceSoftmaxGradKernel : public framework::OpKernel { auto* out_grad = ctx.Input(framework::GradVarName("Out")); auto* x = ctx.Input("X"); auto* x_grad = ctx.Output(framework::GradVarName("X")); + if (x_grad) { + x_grad->set_lod(x->lod()); + } auto lod = x->lod(); const size_t level = lod.size() - 1;