concat_op.h 8.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17
#include <string>
18
#include <utility>
19
#include <vector>
Y
Yi Wang 已提交
20
#include "paddle/fluid/framework/op_registry.h"
C
chengduo 已提交
21
#include "paddle/fluid/operators/math/concat_and_split.h"
Y
Yi Wang 已提交
22
#include "paddle/fluid/operators/strided_memcpy.h"
23
#include "paddle/fluid/operators/utils.h"
24 25 26

namespace paddle {
namespace operators {
27 28
static inline framework::DDim ComputeAndCheckShape(
    const bool is_runtime, const std::vector<framework::DDim>& inputs_dims,
29
    const size_t axis) {
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
  const size_t n = inputs_dims.size();
  auto out_dims = inputs_dims[0];
  size_t in_zero_dims_size = out_dims.size();
  for (size_t i = 1; i < n; i++) {
    for (size_t j = 0; j < in_zero_dims_size; j++) {
      if (j == axis) {
        if (is_runtime) {
          out_dims[axis] += inputs_dims[i][j];
        } else {
          if (inputs_dims[i][j] == -1) {
            out_dims[axis] = -1;
          } else {
            out_dims[axis] += inputs_dims[i][j];
          }
        }
      } else {
        bool check_shape =
            is_runtime || (out_dims[j] > 0 && inputs_dims[i][j] > 0);
        if (check_shape) {
          // check all shape in run time
50 51 52 53 54 55 56
          PADDLE_ENFORCE_EQ(inputs_dims[0][j], inputs_dims[i][j],
                            platform::errors::InvalidArgument(
                                "The %d-th dimension of input[0] and input[%d] "
                                "is expected to be equal."
                                "But received input[0]'s shape = "
                                "[%s], input[%d]'s shape = [%s].",
                                j, i, inputs_dims[0], i, inputs_dims[i]));
57 58 59 60 61 62
        }
      }
    }
  }
  return out_dims;
}
63

64
static inline int64_t ComputeAxis(int64_t axis, int64_t rank) {
65 66 67 68 69
  PADDLE_ENFORCE_EQ(
      axis >= -rank && axis < rank, true,
      platform::errors::InvalidArgument(
          "The axis is expected to be in range of [%d, %d), but got %d", -rank,
          rank, axis));
70 71 72 73 74 75
  if (axis < 0) {
    axis = axis + rank;
  }
  return axis > 0 ? axis : 0;
}

Q
QI JUN 已提交
76
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
77
class ConcatKernel : public framework::OpKernel<T> {
78 79
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
80 81
    auto ins = ctx.MultiInput<framework::LoDTensor>("X");
    framework::LoDTensor* out = ctx.Output<framework::LoDTensor>("Out");
82 83 84
    PADDLE_ENFORCE_NOT_NULL(ins[0],
                            platform::errors::NotFound(
                                "The first input tensor is not initalized."));
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
    auto axis = ctx.Attr<int>("axis");
    bool need_resize_out_dims = false;
    if (ctx.HasInput("AxisTensor")) {
      auto* axis_tensor = ctx.Input<framework::Tensor>("AxisTensor");
      axis = GetDataFromTensor<int>(axis_tensor)[0];
      need_resize_out_dims = true;
    }
    axis = ComputeAxis(static_cast<int64_t>(axis),
                       static_cast<int64_t>(ins[0]->dims().size()));

    if (need_resize_out_dims) {
      const size_t n = ins.size();
      std::vector<framework::DDim> ins_dims(n);
      for (size_t i = 0; i < n; i++) {
        ins_dims[i] = ins[i]->dims();
      }

      framework::DDim out_dims = ComputeAndCheckShape(true, ins_dims, axis);
      out->Resize(out_dims);
    }
Y
Yancey1989 已提交
105 106
    auto place = ctx.GetPlace();
    out->mutable_data<T>(place);
C
chengduoZH 已提交
107

108 109 110 111 112 113 114 115 116 117 118
    // If axis is 0, the lod of the output is not the same as inputs.
    if (axis == 0 && ins[0]->lod().size() > 0) {
      size_t lod_size_0 = ins[0]->lod().size();
      size_t lod_size = lod_size_0;
      for (size_t i = 1; i < ins.size(); ++i) {
        if (ins[i]->lod().size() > 0) {
          PADDLE_ENFORCE_EQ(
              ins[i]->lod().size(), lod_size_0,
              platform::errors::Unimplemented(
                  "The lod level of all input LoDTensors should be same. "
                  "Maybe different lod level of input LoDTensors can concat,"
119 120 121
                  "it is not supported currently. The lod level of %dth input "
                  "is %d and first input is %d.",
                  i, ins[i]->lod().size(), lod_size_0));
122 123 124 125 126 127 128 129 130 131 132 133 134 135
        } else {
          lod_size = 0;
          break;
        }
      }
      if (lod_size) {
        auto* out_lod = out->mutable_lod();
        for (size_t i = 1; i < ins.size(); ++i) {
          auto in_lod = ConvertToLengthBasedLoD(ins[i]->lod());
          AppendLoD(out_lod, in_lod);
        }
      }
    }

C
chengduoZH 已提交
136 137 138 139
    // Sometimes direct copies will be faster, this maybe need deeply analysis.
    if (axis == 0 && ins.size() < 10) {
      size_t output_offset = 0;
      for (auto* in : ins) {
140 141 142
        if (!in || in->numel() == 0UL) {
          continue;
        }
C
chengduoZH 已提交
143 144 145 146 147 148 149 150
        auto in_stride = framework::stride_numel(in->dims());
        auto out_stride = framework::stride_numel(out->dims());
        StridedNumelCopyWithAxis<T>(ctx.device_context(), axis,
                                    out->data<T>() + output_offset, out_stride,
                                    in->data<T>(), in_stride, in_stride[axis]);
        output_offset += in_stride[axis];
      }
    } else {
151
      std::vector<framework::Tensor> inputs;
C
chengduoZH 已提交
152
      for (size_t j = 0; j < ins.size(); ++j) {
153 154 155 156 157
        if (ins[j] && ins[j]->numel() > 0) {
          inputs.push_back(*ins[j]);
        } else {
          continue;
        }
C
chengduoZH 已提交
158 159 160 161
      }
      auto& dev_ctx = ctx.template device_context<DeviceContext>();
      paddle::operators::math::ConcatFunctor<DeviceContext, T> concat_functor;
      concat_functor(dev_ctx, inputs, static_cast<int>(axis), out);
162 163 164 165
    }
  }
};

Q
QI JUN 已提交
166
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
167
class ConcatGradKernel : public framework::OpKernel<T> {
168 169
 public:
  void Compute(const framework::ExecutionContext& ctx) const {
Q
qiaolongfei 已提交
170 171
    auto* out_grad =
        ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
172
    auto ins = ctx.MultiInput<framework::LoDTensor>("X");
H
hong 已提交
173
    auto out_var_names = ctx.OutputNames(framework::GradVarName("X"));
174 175 176 177 178 179 180 181 182 183 184 185
    auto outs =
        ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));

    {
      auto dx = outs;
      auto x = ins;
      for (size_t i = 0; i < dx.size(); ++i) {
        if (dx[i] != nullptr) {
          dx[i]->set_lod(x[i]->lod());
        }
      }
    }
186 187 188
    PADDLE_ENFORCE_NOT_NULL(ins[0],
                            platform::errors::NotFound(
                                "The first input tensor is not initalized."));
Y
Yancey1989 已提交
189

190 191 192 193 194 195 196
    auto axis = ctx.Attr<int>("axis");
    if (ctx.HasInput("AxisTensor")) {
      auto* axis_tensor = ctx.Input<framework::Tensor>("AxisTensor");
      axis = GetDataFromTensor<int>(axis_tensor)[0];
    }
    axis = ComputeAxis(static_cast<int64_t>(axis),
                       static_cast<int64_t>(ins[0]->dims().size()));
Q
qiaolongfei 已提交
197 198 199
    // get output tensor that the name is not kEmptyVarName
    std::vector<framework::Tensor*> outputs;
    for (size_t j = 0; j < outs.size(); ++j) {
200 201
      if (out_var_names[j] != framework::kEmptyVarName &&
          outs[j]->numel() != 0UL) {
Q
qiaolongfei 已提交
202 203 204 205 206 207
        outs[j]->mutable_data<T>(ctx.GetPlace());
        outputs.push_back(outs[j]);
      } else {
        outputs.push_back(nullptr);
      }
    }
C
chengduo 已提交
208
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
Q
qiaolongfei 已提交
209

C
chengduoZH 已提交
210 211
    // Sometimes direct copies will be faster, this maybe need deeply analysis.
    if (axis == 0 && outs.size() < 10) {
C
chengduo 已提交
212 213 214
      std::vector<const framework::Tensor*> ref_shape;
      ref_shape.insert(ref_shape.begin(), ins.begin(), ins.end());
      StridedMemcpyWithAxis0<T>(dev_ctx, *out_grad, ref_shape, &outputs);
C
chengduoZH 已提交
215
    } else {
C
chengduo 已提交
216 217 218
      math::SplitFunctor<DeviceContext, T> split_functor;
      split_functor(dev_ctx, *out_grad, ctx.MultiInput<framework::Tensor>("X"),
                    static_cast<int>(axis), &outputs);
C
chengduoZH 已提交
219
    }
220 221 222 223 224
  }
};

}  // namespace operators
}  // namespace paddle