diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index 1b1b8bf5ed959dd9c2ce8c9f5c905a75b81865fd..a496301526f58875ff51aeaa5b2094c3c656531c 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -60,34 +60,45 @@ template class ConcatGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* in = ctx.Input(framework::GradVarName("Out")); + auto* out_grad = + ctx.Input(framework::GradVarName("Out")); + auto ins = ctx.MultiInput("X"); + auto out_var_names = ctx.Outputs(framework::GradVarName("X")); auto outs = ctx.MultiOutput(framework::GradVarName("X")); int64_t axis = static_cast(ctx.Attr("axis")); + // get output tensor that the name is not kEmptyVarName + std::vector outputs; + for (size_t j = 0; j < outs.size(); ++j) { + if (out_var_names[j] != framework::kEmptyVarName) { + outs[j]->mutable_data(ctx.GetPlace()); + outputs.push_back(outs[j]); + } else { + outputs.push_back(nullptr); + } + } + // Sometimes direct copies will be faster, this maybe need deeply analysis. if (axis == 0 && outs.size() < 10) { size_t input_offset = 0; - auto in_stride = framework::stride_numel(in->dims()); + const auto in_stride = framework::stride_numel(out_grad->dims()); - for (auto& out : outs) { - out->mutable_data(ctx.GetPlace()); - auto out_stride = framework::stride_numel(out->dims()); - StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), - out_stride, in->data() + input_offset, - in_stride, out_stride[axis]); + for (size_t i = 0; i < outs.size(); ++i) { + auto out_stride = framework::stride_numel(ins[i]->dims()); + auto* out = outputs[i]; + if (out != nullptr) { + StridedNumelCopyWithAxis( + ctx.device_context(), axis, out->data(), out_stride, + out_grad->data() + input_offset, in_stride, out_stride[axis]); + } input_offset += out_stride[axis]; } } else { - std::vector outputs(outs.size()); - for (size_t j = 0; j < outs.size(); ++j) { - outs[j]->mutable_data(ctx.GetPlace()); - outputs[j] = *outs[j]; - } - auto& dev_ctx = ctx.template device_context(); paddle::operators::math::ConcatGradFunctor concat_grad_functor; - concat_grad_functor(dev_ctx, *in, static_cast(axis), &outputs); + concat_grad_functor(dev_ctx, *out_grad, ins, static_cast(axis), + &outputs); } } }; diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index cc69212466b72f3fa82e8f5f58b4f3229dab28ec..c10cff9c9b193f8fd330b4a25eb4d7c02f5fc1cc 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -70,35 +70,40 @@ template class ConcatGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, const int axis, - std::vector* outputs) { + const framework::Tensor& input, + const std::vector& ref_inputs, + const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking - int num = outputs->size(); + size_t num = outputs->size(); int input_rows = 1; - auto dim_0 = outputs->at(0).dims(); + auto dim_0 = ref_inputs[0]->dims(); for (int i = 0; i < axis; ++i) { input_rows *= dim_0[i]; } + int input_cols = 0; std::vector output_cols(outputs->size()); - for (int i = 0; i < num; ++i) { - int t_cols = outputs->at(i).numel() / input_rows; + for (size_t i = 0; i < num; ++i) { + int t_cols = ref_inputs[i]->numel() / input_rows; input_cols += t_cols; output_cols[i] = t_cols; } auto cpu_place = boost::get(context.GetPlace()); // computation - for (int k = 0; k < input_rows; ++k) { + for (size_t k = 0; k < input_rows; ++k) { const T* src_ptr = input.data() + k * input_cols; int col_idx = 0; for (int j = 0; j < num; ++j) { int col_len = output_cols[j]; - T* dst_ptr = outputs->at(j).data() + k * col_len; - memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx, - sizeof(T) * col_len); + auto* out_tensor = (*outputs)[j]; + if (out_tensor != nullptr) { + T* dst_ptr = out_tensor->data() + k * col_len; + memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx, + sizeof(T) * col_len); + } col_idx += col_len; } } diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat.h index 041ce8bf8a2e9528a004c076ead4471a3837c1a6..9e080f2e8be23768dcea47b577043beef37b2eaf 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat.h @@ -57,7 +57,8 @@ template class ConcatGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - const int axis, std::vector* outputs); + const std::vector& ref_inputs, + const int axis, std::vector* outputs); }; } // namespace math