concat_op.h 4.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17
#include <utility>
18
#include <vector>
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/op_registry.h"
C
chengduo 已提交
20
#include "paddle/fluid/operators/math/concat_and_split.h"
Y
Yi Wang 已提交
21
#include "paddle/fluid/operators/strided_memcpy.h"
22 23 24 25

namespace paddle {
namespace operators {

Q
QI JUN 已提交
26
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
27
class ConcatKernel : public framework::OpKernel<T> {
28 29 30
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto ins = ctx.MultiInput<framework::Tensor>("X");
C
chengduoZH 已提交
31
    framework::Tensor* out = ctx.Output<framework::Tensor>("Out");
32
    int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
Y
Yancey1989 已提交
33 34
    auto place = ctx.GetPlace();
    out->mutable_data<T>(place);
C
chengduoZH 已提交
35

C
chengduoZH 已提交
36 37 38 39
    // Sometimes direct copies will be faster, this maybe need deeply analysis.
    if (axis == 0 && ins.size() < 10) {
      size_t output_offset = 0;
      for (auto* in : ins) {
40 41 42
        if (!in || in->numel() == 0UL) {
          continue;
        }
C
chengduoZH 已提交
43 44 45 46 47 48 49 50
        auto in_stride = framework::stride_numel(in->dims());
        auto out_stride = framework::stride_numel(out->dims());
        StridedNumelCopyWithAxis<T>(ctx.device_context(), axis,
                                    out->data<T>() + output_offset, out_stride,
                                    in->data<T>(), in_stride, in_stride[axis]);
        output_offset += in_stride[axis];
      }
    } else {
51
      std::vector<framework::Tensor> inputs;
C
chengduoZH 已提交
52
      for (size_t j = 0; j < ins.size(); ++j) {
53 54 55 56 57
        if (ins[j] && ins[j]->numel() > 0) {
          inputs.push_back(*ins[j]);
        } else {
          continue;
        }
C
chengduoZH 已提交
58 59 60 61
      }
      auto& dev_ctx = ctx.template device_context<DeviceContext>();
      paddle::operators::math::ConcatFunctor<DeviceContext, T> concat_functor;
      concat_functor(dev_ctx, inputs, static_cast<int>(axis), out);
62 63 64 65
    }
  }
};

Q
QI JUN 已提交
66
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
67
class ConcatGradKernel : public framework::OpKernel<T> {
68 69
 public:
  void Compute(const framework::ExecutionContext& ctx) const {
Q
qiaolongfei 已提交
70 71
    auto* out_grad =
        ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
72
    auto ins = ctx.MultiInput<framework::LoDTensor>("X");
Q
qiaolongfei 已提交
73
    auto out_var_names = ctx.Outputs(framework::GradVarName("X"));
74 75 76 77 78 79 80 81 82 83 84 85 86
    auto outs =
        ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));

    {
      auto dx = outs;
      auto x = ins;
      for (size_t i = 0; i < dx.size(); ++i) {
        if (dx[i] != nullptr) {
          dx[i]->set_lod(x[i]->lod());
        }
      }
    }

87
    int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
Y
Yancey1989 已提交
88

Q
qiaolongfei 已提交
89 90 91
    // get output tensor that the name is not kEmptyVarName
    std::vector<framework::Tensor*> outputs;
    for (size_t j = 0; j < outs.size(); ++j) {
92 93
      if (out_var_names[j] != framework::kEmptyVarName &&
          outs[j]->numel() != 0UL) {
Q
qiaolongfei 已提交
94 95 96 97 98 99
        outs[j]->mutable_data<T>(ctx.GetPlace());
        outputs.push_back(outs[j]);
      } else {
        outputs.push_back(nullptr);
      }
    }
C
chengduo 已提交
100
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
Q
qiaolongfei 已提交
101

C
chengduoZH 已提交
102 103
    // Sometimes direct copies will be faster, this maybe need deeply analysis.
    if (axis == 0 && outs.size() < 10) {
C
chengduo 已提交
104 105 106
      std::vector<const framework::Tensor*> ref_shape;
      ref_shape.insert(ref_shape.begin(), ins.begin(), ins.end());
      StridedMemcpyWithAxis0<T>(dev_ctx, *out_grad, ref_shape, &outputs);
C
chengduoZH 已提交
107
    } else {
C
chengduo 已提交
108 109 110
      math::SplitFunctor<DeviceContext, T> split_functor;
      split_functor(dev_ctx, *out_grad, ctx.MultiInput<framework::Tensor>("X"),
                    static_cast<int>(axis), &outputs);
C
chengduoZH 已提交
111
    }
112 113 114 115 116
  }
};

}  // namespace operators
}  // namespace paddle