concat_op_mlu.cc 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/concat_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h"
17
#include "paddle/phi/core/tensor_utils.h"
18 19 20 21 22 23 24 25

namespace paddle {
namespace operators {

template <typename T>
class ConcatMLUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
26 27
    auto ins = ctx.MultiInput<phi::DenseTensor>("X");
    phi::DenseTensor* out = ctx.Output<phi::DenseTensor>("Out");
28 29 30 31 32 33 34
    PADDLE_ENFORCE_NOT_NULL(ins[0],
                            platform::errors::NotFound(
                                "The first input tensor is not initalized."));
    auto axis = ctx.Attr<int>("axis");
    auto ins_size = ins.size();
    bool need_resize_out_dims = false;
    if (ctx.HasInput("AxisTensor")) {
35
      auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor");
36
      axis = phi::GetVectorFromTensor<int>(axis_tensor)[0];
37 38 39 40 41 42 43 44 45 46 47 48
      need_resize_out_dims = true;
    }
    axis = ComputeAxis(static_cast<int64_t>(axis),
                       static_cast<int64_t>(ins[0]->dims().size()));

    if (need_resize_out_dims) {
      const size_t n = ins.size();
      std::vector<framework::DDim> ins_dims(n);
      for (size_t i = 0; i < n; i++) {
        ins_dims[i] = ins[i]->dims();
      }

49
      framework::DDim out_dims =
50
          phi::funcs::ComputeAndCheckShape(true, ins_dims, axis);
51 52 53 54 55 56 57 58 59 60 61 62 63 64
      out->Resize(out_dims);
    }
    const int axis_t = axis;
    const int ins_size_t = ins_size;
    auto place = ctx.GetPlace();
    out->mutable_data<T>(place);

    // mlu should do sth
    // init ins tensors
    std::vector<const void*> inputs;
    std::vector<MLUCnnlTensorDesc> input_descs;
    std::vector<cnnlTensorDescriptor_t> desc_vector;
    for (size_t i = 0; i < ins_size; i++) {
      input_descs.emplace_back(MLUCnnlTensorDesc(
65
          *ins[i], CNNL_LAYOUT_ARRAY, ToCnnlDataType(ins[i]->dtype())));
66 67 68 69
      desc_vector.push_back(input_descs.back().get());
      inputs.push_back(GetBasePtr(ins[i]));
    }
    // init out tensors
70 71
    MLUCnnlTensorDesc output_desc(
        *out, CNNL_LAYOUT_ARRAY, ToCnnlDataType(out->dtype()));
72 73

    // MLU should do sth
74 75 76 77 78 79 80
    MLUCnnl::Concat(ctx,
                    ins_size_t,
                    axis_t,
                    desc_vector.data(),
                    inputs.data(),
                    output_desc.get(),
                    GetBasePtr(out));
81 82
  }
};
83 84 85 86 87

template <typename T>
class ConcatGradMLUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
88
    auto* out_grad = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
89
    auto ins = ctx.MultiInput<phi::DenseTensor>("X");
90
    auto out_var_names = ctx.OutputNames(framework::GradVarName("X"));
91
    auto outs = ctx.MultiOutput<phi::DenseTensor>(framework::GradVarName("X"));
92 93 94 95 96 97 98 99
    auto axis = ctx.Attr<int>("axis");
    int split_num = ins.size();

    PADDLE_ENFORCE_NOT_NULL(ins[0],
                            platform::errors::NotFound(
                                "The first input tensor is not initalized."));

    if (ctx.HasInput("AxisTensor")) {
100
      auto* axis_tensor = ctx.Input<phi::DenseTensor>("AxisTensor");
101
      axis = phi::GetVectorFromTensor<int>(axis_tensor)[0];
102 103 104 105
    }

    axis = ComputeAxis(static_cast<int64_t>(axis),
                       static_cast<int64_t>(ins[0]->dims().size()));
106 107
    PADDLE_ENFORCE_GE(axis,
                      0,
108 109 110 111
                      platform::errors::InvalidArgument(
                          "concat_grad: axis should be larger than or "
                          "equal to 0, but received axis is %d.",
                          axis));
112
    PADDLE_ENFORCE_LT(
113 114
        axis,
        out_grad->dims().size(),
115 116 117 118
        platform::errors::InvalidArgument(
            "concat_grad: axis should be less than ins[0]->dims()!"
            "But received axis is %d, while ins[0]->dims()"
            "size is %d.",
119 120
            axis,
            out_grad->dims().size()));
121 122
    // get output tensor that the name is not kEmptyVarName
    std::vector<void*> outputs_vec;
123
    std::vector<phi::DenseTensor> tmp_outputs_vec;
124 125 126 127 128 129 130 131 132
    std::vector<MLUCnnlTensorDesc> output_descs;
    std::vector<cnnlTensorDescriptor_t> descs_vec;
    for (size_t j = 0; j < outs.size(); ++j) {
      if (out_var_names[j] != framework::kEmptyVarName &&
          outs[j]->numel() != 0UL) {
        outs[j]->mutable_data<T>(ctx.GetPlace());
        output_descs.emplace_back(MLUCnnlTensorDesc(*outs[j]));
        outputs_vec.push_back(GetBasePtr(outs[j]));
      } else {
133
        phi::DenseTensor tmp_tensor;
134 135 136 137
        tmp_tensor.mutable_data<T>(ins[j]->dims(), ctx.GetPlace());
        tmp_outputs_vec.push_back(tmp_tensor);
        output_descs.emplace_back(MLUCnnlTensorDesc(*ins[j]));
        outputs_vec.push_back(GetBasePtr(&(tmp_outputs_vec.back())));
138
      }
139
      descs_vec.push_back(output_descs.back().get());
140 141 142
    }

    MLUCnnlTensorDesc out_grad_desc(*out_grad);
143 144 145 146 147 148
    MLUCnnl::Split(ctx,
                   static_cast<int>(split_num),
                   static_cast<int>(axis),
                   out_grad_desc.get(),
                   GetBasePtr(out_grad),
                   descs_vec.data(),
149 150 151
                   outputs_vec.data());
  }
};
152 153 154 155 156
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

157 158
REGISTER_OP_MLU_KERNEL(concat,
                       ops::ConcatMLUKernel<float>,
159 160
                       ops::ConcatMLUKernel<paddle::platform::float16>,
                       ops::ConcatMLUKernel<int64_t>,
161 162
                       ops::ConcatMLUKernel<bool>,
                       ops::ConcatMLUKernel<int>,
163
                       ops::ConcatMLUKernel<uint8_t>);
164 165
REGISTER_OP_MLU_KERNEL(concat_grad,
                       ops::ConcatGradMLUKernel<float>,
166 167 168 169 170
                       ops::ConcatGradMLUKernel<paddle::platform::float16>,
                       ops::ConcatGradMLUKernel<int64_t>,
                       ops::ConcatGradMLUKernel<bool>,
                       ops::ConcatGradMLUKernel<int>,
                       ops::ConcatGradMLUKernel<uint8_t>);