concat_op.h 4.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17
#include <utility>
18
#include <vector>
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/op_registry.h"
C
chengduo 已提交
20
#include "paddle/fluid/operators/math/concat_and_split.h"
Y
Yi Wang 已提交
21
#include "paddle/fluid/operators/strided_memcpy.h"
22 23 24 25

namespace paddle {
namespace operators {

26 27 28 29 30 31 32
static inline int64_t ComputeAxis(int64_t axis, int64_t rank) {
  if (axis < 0) {
    axis = axis + rank;
  }
  return axis > 0 ? axis : 0;
}

Q
QI JUN 已提交
33
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
34
class ConcatKernel : public framework::OpKernel<T> {
35 36 37
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto ins = ctx.MultiInput<framework::Tensor>("X");
C
chengduoZH 已提交
38
    framework::Tensor* out = ctx.Output<framework::Tensor>("Out");
39 40 41
    PADDLE_ENFORCE(ins[0], "The input should not be null.");
    auto axis = ComputeAxis(static_cast<int64_t>(ctx.Attr<int>("axis")),
                            static_cast<int64_t>(ins[0]->dims().size()));
Y
Yancey1989 已提交
42 43
    auto place = ctx.GetPlace();
    out->mutable_data<T>(place);
C
chengduoZH 已提交
44

C
chengduoZH 已提交
45 46 47 48
    // Sometimes direct copies will be faster, this maybe need deeply analysis.
    if (axis == 0 && ins.size() < 10) {
      size_t output_offset = 0;
      for (auto* in : ins) {
49 50 51
        if (!in || in->numel() == 0UL) {
          continue;
        }
C
chengduoZH 已提交
52 53 54 55 56 57 58 59
        auto in_stride = framework::stride_numel(in->dims());
        auto out_stride = framework::stride_numel(out->dims());
        StridedNumelCopyWithAxis<T>(ctx.device_context(), axis,
                                    out->data<T>() + output_offset, out_stride,
                                    in->data<T>(), in_stride, in_stride[axis]);
        output_offset += in_stride[axis];
      }
    } else {
60
      std::vector<framework::Tensor> inputs;
C
chengduoZH 已提交
61
      for (size_t j = 0; j < ins.size(); ++j) {
62 63 64 65 66
        if (ins[j] && ins[j]->numel() > 0) {
          inputs.push_back(*ins[j]);
        } else {
          continue;
        }
C
chengduoZH 已提交
67 68 69 70
      }
      auto& dev_ctx = ctx.template device_context<DeviceContext>();
      paddle::operators::math::ConcatFunctor<DeviceContext, T> concat_functor;
      concat_functor(dev_ctx, inputs, static_cast<int>(axis), out);
71 72 73 74
    }
  }
};

Q
QI JUN 已提交
75
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
76
class ConcatGradKernel : public framework::OpKernel<T> {
77 78
 public:
  void Compute(const framework::ExecutionContext& ctx) const {
Q
qiaolongfei 已提交
79 80
    auto* out_grad =
        ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
81
    auto ins = ctx.MultiInput<framework::LoDTensor>("X");
Q
qiaolongfei 已提交
82
    auto out_var_names = ctx.Outputs(framework::GradVarName("X"));
83 84 85 86 87 88 89 90 91 92 93 94
    auto outs =
        ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));

    {
      auto dx = outs;
      auto x = ins;
      for (size_t i = 0; i < dx.size(); ++i) {
        if (dx[i] != nullptr) {
          dx[i]->set_lod(x[i]->lod());
        }
      }
    }
95 96 97
    PADDLE_ENFORCE(ins[0], "The input should not be null.");
    auto axis = ComputeAxis(static_cast<int64_t>(ctx.Attr<int>("axis")),
                            static_cast<int64_t>(ins[0]->dims().size()));
Y
Yancey1989 已提交
98

Q
qiaolongfei 已提交
99 100 101
    // get output tensor that the name is not kEmptyVarName
    std::vector<framework::Tensor*> outputs;
    for (size_t j = 0; j < outs.size(); ++j) {
102 103
      if (out_var_names[j] != framework::kEmptyVarName &&
          outs[j]->numel() != 0UL) {
Q
qiaolongfei 已提交
104 105 106 107 108 109
        outs[j]->mutable_data<T>(ctx.GetPlace());
        outputs.push_back(outs[j]);
      } else {
        outputs.push_back(nullptr);
      }
    }
C
chengduo 已提交
110
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
Q
qiaolongfei 已提交
111

C
chengduoZH 已提交
112 113
    // Sometimes direct copies will be faster, this maybe need deeply analysis.
    if (axis == 0 && outs.size() < 10) {
C
chengduo 已提交
114 115 116
      std::vector<const framework::Tensor*> ref_shape;
      ref_shape.insert(ref_shape.begin(), ins.begin(), ins.end());
      StridedMemcpyWithAxis0<T>(dev_ctx, *out_grad, ref_shape, &outputs);
C
chengduoZH 已提交
117
    } else {
C
chengduo 已提交
118 119 120
      math::SplitFunctor<DeviceContext, T> split_functor;
      split_functor(dev_ctx, *out_grad, ctx.MultiInput<framework::Tensor>("X"),
                    static_cast<int>(axis), &outputs);
C
chengduoZH 已提交
121
    }
122 123 124 125 126
  }
};

}  // namespace operators
}  // namespace paddle