concat_op.cc 7.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/concat_op.h"
1
123malin 已提交
16

17
#include <paddle/fluid/platform/complex.h>
18

P
phlrain 已提交
19
#include <memory>
S
Siddharth Goyal 已提交
20
#include <string>
21 22
#include <vector>

23
#include "paddle/fluid/framework/infershape_utils.h"
24
#include "paddle/phi/infermeta/multiary.h"
25
#include "paddle/phi/kernels/funcs/concat_funcs.h"
26

P
phlrain 已提交
27 28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include <paddle/fluid/platform/mkldnn_helper.h>
#endif

31 32
namespace paddle {
namespace operators {
33
using Tensor = framework::Tensor;
34 35 36 37 38

class ConcatOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

P
phlrain 已提交
39 40 41
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
42
    auto inputs = ctx.MultiInput<Tensor>("X");
43 44
    auto input_data_type = framework::proto::VarType::Type(0);
    bool flag = 0;
45 46
    for (auto *input : inputs) {
      if (input->IsInitialized() && input->numel() > 0) {
47
        input_data_type = framework::TransToProtoVarType(input->dtype());
48 49 50 51 52
        flag = 1;
        break;
      }
    }
    if (flag == 0) {
1
123malin 已提交
53 54
      PADDLE_THROW(platform::errors::InvalidArgument(
          "All Inputs of Concat OP are Empty!"));
55
    }
P
phlrain 已提交
56
#ifdef PADDLE_WITH_MKLDNN
57
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
P
phlrain 已提交
58 59 60 61 62 63 64
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
65 66 67 68 69 70 71 72 73 74

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "AxisTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
75 76 77 78
};

class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
79
  void Make() override {
80 81
    AddInput("X", "Input tensors of concat operator.").AsDuplicable();
    AddOutput("Out", "Output tensor of concat operator.");
P
phlrain 已提交
82 83 84
    AddAttr<bool>(
        "use_mkldnn",
        "(bool, default false) Indicates if MKL-DNN kernel will be used")
Z
zmx 已提交
85 86
        .SetDefault(false)
        .AsExtra();
87
    AddAttr<int>("axis",
88 89 90 91
                 "The axis along which the input tensors will be concatenated."
                 "The axis could also be negative numbers. Negative axis is "
                 "interpreted as counting from the end of the rank."
                 "i.e., axis + rank(X) th dimension.")
92
        .SetDefault(0);
93 94 95 96 97 98
    AddInput("AxisTensor",
             "(Tensor) The axis along which the input tensors will be "
             "concatenated.  "
             "It has higher priority than Attr(axis). "
             "The shape of AxisTensor must be [1].")
        .AsDispensable();
99 100 101 102
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
Z
zmx 已提交
103 104
        .SetDefault(false)
        .AsExtra();
105 106 107 108
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
Z
zmx 已提交
109 110
        .InEnum({"float32", "int8", "bfloat16"})
        .AsExtra();
111 112 113 114 115 116 117 118 119 120 121 122 123
    AddComment(R"DOC(
Concat Operator.

Concatenate the input tensors along dimension axis.
Examples:
  Input[0] = [[1,2],[3,4]]
  Input[1] = [[5,6]]
  axis = 0
  Output = [[1,2],
            [3,4],
            [5,6]]

)DOC");
124 125 126
  }
};

127 128
class ConcatOpGrad : public framework::OperatorWithKernel {
 public:
P
phlrain 已提交
129
  using framework::OperatorWithKernel::OperatorWithKernel;
130

131
  void InferShape(framework::InferShapeContext *ctx) const override {
C
chengduo 已提交
132 133 134
    auto in_x = "X";
    auto out_x_g_n = framework::GradVarName(in_x);
    ctx->SetOutputsDim(out_x_g_n, ctx->GetInputsDim(in_x));
H
hong 已提交
135 136

    ctx->ShareAllLoD(in_x, out_x_g_n);
137
  }
P
phlrain 已提交
138 139 140 141

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));

#ifdef PADDLE_WITH_MKLDNN
    // extra checking if attr "use_mkldnn" exist is needed because
    // test_reverse_op is calling concat_grad kernel without setting
    // "use_mkldnn" to any value
    if (ctx.HasAttr("use_mkldnn") &&
        this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
P
phlrain 已提交
157
  }
158 159 160 161 162 163 164 165 166 167

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "AxisTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
P
phlrain 已提交
168 169
};

170
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ConcatOpGradNoNeedBufferVarInferer, "X");
P
phlrain 已提交
171

H
hong 已提交
172 173
template <typename T>
class ConcatGradOpMaker : public framework::SingleGradOpMaker<T> {
P
phlrain 已提交
174
 public:
H
hong 已提交
175
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
P
phlrain 已提交
176 177

 protected:
178
  void Apply(GradOpPtr<T> op) const override {
P
phlrain 已提交
179
    op->SetType("concat_grad");
H
hong 已提交
180
    op->SetInput("X", this->Input("X"));
H
hong 已提交
181 182 183
    if (this->HasInput("AxisTensor")) {
      op->SetInput("AxisTensor", this->Input("AxisTensor"));
    }
H
hong 已提交
184 185 186
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X", false));
    op->SetAttrMap(this->Attrs());
P
phlrain 已提交
187
  }
188 189
};

C
ceci3 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202 203
template <typename T>
class ConcatDoubleGradOpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
    grad_op->SetType("concat");
    grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
    grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
    grad_op->SetAttrMap(this->Attrs());
  }
};

204 205 206 207
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
208

209 210
DECLARE_INFER_SHAPE_FUNCTOR(concat, ConcatInferShapeFunctor,
                            PD_INFER_META(phi::ConcatInferMeta));
211

212
REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker,
H
hong 已提交
213
                  ops::ConcatGradOpMaker<paddle::framework::OpDesc>,
214 215
                  ops::ConcatGradOpMaker<paddle::imperative::OpBase>,
                  ConcatInferShapeFunctor);
P
phlrain 已提交
216
REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad,
C
ceci3 已提交
217 218
                  ops::ConcatDoubleGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConcatDoubleGradOpMaker<paddle::imperative::OpBase>,
219
                  ops::ConcatOpGradNoNeedBufferVarInferer);