concat_op_xpu.cc 7.3 KB
Newer Older
L
liuyuhui 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#ifdef PADDLE_WITH_XPU
L
liuyuhui 已提交
15 16 17 18
#include "paddle/fluid/operators/concat_op.h"
#include <memory>
#include <string>
#include <vector>
19
#include "paddle/fluid/platform/xpu_header.h"
L
liuyuhui 已提交
20 21 22 23 24 25 26 27 28

namespace paddle {
namespace operators {
using Tensor = framework::Tensor;

template <typename DeviceContext, typename T>
class ConcatXPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
29 30
    auto ins = ctx.MultiInput<framework::LoDTensor>("X");
    framework::LoDTensor* out = ctx.Output<framework::LoDTensor>("Out");
L
liuyuhui 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43
    int axis = ctx.Attr<int>("axis");
    PADDLE_ENFORCE_NE(ins[0], nullptr, platform::errors::InvalidArgument(
                                           "The input should not be null."));
    PADDLE_ENFORCE_NE(ctx.HasInput("AxisTensor"), true,
                      platform::errors::InvalidArgument(
                          "XPU donot surpport AxisTensor for now"));
    axis = ComputeAxis(static_cast<int64_t>(axis),
                       static_cast<int64_t>(ins[0]->dims().size()));
    PADDLE_ENFORCE_GE(
        axis, 0, platform::errors::InvalidArgument("concat: axis shoud >= 0!"));
    PADDLE_ENFORCE_LT(axis, ins[0]->dims().size(),
                      platform::errors::InvalidArgument(
                          "concat: axis shoud < ins[0]->dims()!"));
44

L
liuyuhui 已提交
45 46 47 48 49 50 51 52 53 54 55 56
    auto place = ctx.GetPlace();
    out->mutable_data<T>(place);
    std::vector<int> choose_idx;
    int n = 0;
    for (unsigned int i = 0; i < ins.size(); ++i) {
      if (ins[i] && ins[i]->numel() > 0) {
        choose_idx.push_back(i);
        n++;
      }
    }
    PADDLE_ENFORCE_GT(
        n, 0, platform::errors::InvalidArgument("No tensor need concat?"));
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

    // If axis is 0, the lod of the output is not the same as inputs.
    if (axis == 0 && ins[0]->lod().size() > 0) {
      size_t lod_size_0 = ins[0]->lod().size();
      size_t lod_size = lod_size_0;
      for (size_t i = 1; i < ins.size(); ++i) {
        if (ins[i]->lod().size() > 0) {
          PADDLE_ENFORCE_EQ(
              ins[i]->lod().size(), lod_size_0,
              platform::errors::Unimplemented(
                  "The lod level of all input LoDTensors should be same. "
                  "Maybe different lod level of input LoDTensors can concat,"
                  "it is not supported currently. The lod level of %dth input "
                  "is %d and first input is %d.",
                  i, ins[i]->lod().size(), lod_size_0));
        } else {
          lod_size = 0;
          break;
        }
L
liuyuhui 已提交
76
      }
77 78 79 80 81 82
      if (lod_size) {
        auto* out_lod = out->mutable_lod();
        for (size_t i = 1; i < ins.size(); ++i) {
          auto in_lod = ConvertToLengthBasedLoD(ins[i]->lod());
          AppendLoD(out_lod, in_lod);
        }
L
liuyuhui 已提交
83 84
      }
    }
85 86 87 88 89 90 91 92 93 94 95

    auto input_dims = ins[0]->dims();
    std::vector<std::vector<int>> xdims_list(n);
    for (int i = 0; i < n; ++i) {
      std::vector<int> tmp_dims(input_dims.size());
      for (int j = 0; j < input_dims.size(); ++j) {
        tmp_dims[j] = ins[i]->dims()[j];
      }
      xdims_list[i] = tmp_dims;
    }

L
liuyuhui 已提交
96
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
97
    std::vector<const T*> ptrs;
L
liuyuhui 已提交
98
    for (int i = 0; i < n; ++i) {
99
      ptrs.push_back(ins[choose_idx[i]]->data<T>());
L
liuyuhui 已提交
100
    }
101 102
    int r = xpu::concat<T>(dev_ctx.x_context(), ptrs, out->data<T>(),
                           xdims_list, axis);
L
liuyuhui 已提交
103 104 105 106 107 108 109 110
    PADDLE_ENFORCE_EQ(
        r, XPU_SUCCESS,
        platform::errors::External(
            "XPU API return wrong value[%d], please check whether "
            "Baidu Kunlun Card is properly installed.",
            r));
  }
};
111

L
liuyuhui 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
template <typename DeviceContext, typename T>
class ConcatGradXPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const {
    auto* out_grad =
        ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto ins = ctx.MultiInput<framework::LoDTensor>("X");
    auto out_var_names = ctx.OutputNames(framework::GradVarName("X"));
    auto outs =
        ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));
    {
      auto dx = outs;
      auto x = ins;
      for (size_t i = 0; i < dx.size(); ++i) {
        if (dx[i] != nullptr) {
          dx[i]->set_lod(x[i]->lod());
        }
      }
    }
    PADDLE_ENFORCE_NE(ins[0], nullptr, platform::errors::InvalidArgument(
                                           "The input should not be null."));
    auto axis = ctx.Attr<int>("axis");
    if (ctx.HasInput("AxisTensor")) {
      auto* axis_tensor = ctx.Input<framework::Tensor>("AxisTensor");
      axis = GetDataFromTensor<int>(axis_tensor)[0];
    }
    axis = ComputeAxis(static_cast<int64_t>(axis),
                       static_cast<int64_t>(ins[0]->dims().size()));
    // get output tensor that the name is not kEmptyVarName
    std::vector<framework::Tensor*> outputs;
142 143
    std::vector<int> choose_idx;
    int n = 0;
L
liuyuhui 已提交
144 145 146 147 148
    for (size_t j = 0; j < outs.size(); ++j) {
      if (out_var_names[j] != framework::kEmptyVarName &&
          outs[j]->numel() != 0UL) {
        outs[j]->mutable_data<T>(ctx.GetPlace());
        outputs.push_back(outs[j]);
149 150
        choose_idx.push_back(j);
        n++;
L
liuyuhui 已提交
151 152 153 154 155 156 157
      }
    }
    PADDLE_ENFORCE_GE(axis, 0, platform::errors::InvalidArgument(
                                   "concat_grad: axis shoud >= 0!"));
    PADDLE_ENFORCE_LT(axis, out_grad->dims().size(),
                      platform::errors::InvalidArgument(
                          "concat_grad: axis shoud < ins[0]->dims()!"));
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

    auto input_dims = ins[0]->dims();
    std::vector<int> split_list(n);
    std::vector<int> xdims_list(input_dims.size());
    int total_length = 0;
    for (int i = 0; i < n; ++i) {
      split_list[i] = ins[i]->dims()[axis];
      total_length += ins[i]->dims()[axis];
    }
    for (int i = 0; i < input_dims.size(); ++i) {
      if (i == axis) {
        continue;
      }
      xdims_list[i] = input_dims[i];
    }
    xdims_list[axis] = total_length;

    std::vector<T*> ptrs(n);
L
liuyuhui 已提交
176 177 178
    for (int i = 0; i < n; ++i) {
      ptrs[i] = outputs[i]->data<T>();
    }
179 180 181 182

    auto& dev_ctx = ctx.template device_context<DeviceContext>();
    int r = xpu::split<T>(dev_ctx.x_context(), out_grad->data<T>(), ptrs,
                          xdims_list, split_list, axis);
L
liuyuhui 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
    PADDLE_ENFORCE_EQ(
        r, XPU_SUCCESS,
        platform::errors::External(
            "XPU API return wrong value[%d], please check whether "
            "Baidu Kunlun Card is properly installed.",
            r));
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
    concat, ops::ConcatXPUKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(
    concat_grad,
    ops::ConcatGradXPUKernel<paddle::platform::XPUDeviceContext, float>);

#endif