concat_op.cc 7.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/concat_op.h"
P
phlrain 已提交
16
#include <memory>
S
Siddharth Goyal 已提交
17
#include <string>
18 19
#include <vector>

P
phlrain 已提交
20 21 22 23
#ifdef PADDLE_WITH_MKLDNN
#include <paddle/fluid/platform/mkldnn_helper.h>
#endif

24 25
namespace paddle {
namespace operators {
26
using Tensor = framework::Tensor;
27 28 29 30 31

class ConcatOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

32
  void InferShape(framework::InferShapeContext *ctx) const override {
33
    PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL,
H
hutuxian 已提交
34
                      "Inputs(X) of ConcatOp should not be empty.");
Q
Qiao Longfei 已提交
35 36
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of ConcatOp should not be null.");
37

Q
Qiao Longfei 已提交
38
    auto ins = ctx->GetInputsDim("X");
39 40 41 42
    size_t axis =
        ComputeAxis(static_cast<int64_t>(ctx->Attrs().Get<int>("axis")),
                    static_cast<int64_t>(ins[0].size()));

43
    const size_t n = ins.size();
44

45 46 47 48
    PADDLE_ENFORCE_GT(n, 0, "Input tensors count should > 0.");
    if (n == 1) {
      VLOG(3) << "Warning: concat op have only one input, may waste memory";
    }
49

Q
Qiao Longfei 已提交
50
    auto out_dims = ins[0];
51 52 53 54
    size_t in_zero_dims_size = out_dims.size();
    for (size_t i = 1; i < n; i++) {
      for (size_t j = 0; j < in_zero_dims_size; j++) {
        if (j == axis) {
P
phlrain 已提交
55 56 57
          if (ctx->IsRuntime()) {
            out_dims[axis] += ins[i][j];
          } else {
P
phlrain 已提交
58
            if (ins[i][j] == -1) {
P
phlrain 已提交
59 60 61 62 63
              out_dims[axis] = -1;
            } else {
              out_dims[axis] += ins[i][j];
            }
          }
Q
Qiao Longfei 已提交
64
        } else {
H
Hongyu Liu 已提交
65 66 67
          bool check_shape =
              ctx->IsRuntime() || (out_dims[j] > 0 && ins[i][j] > 0);
          if (check_shape) {
P
phlrain 已提交
68 69 70 71 72
            // check all shape in run time
            PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j],
                              "Input tensors should have the same "
                              "elements except the specify axis.");
          }
73 74 75
        }
      }
    }
Q
Qiao Longfei 已提交
76 77 78
    if (out_dims[axis] < 0) {
      out_dims[axis] = -1;
    }
Q
Qiao Longfei 已提交
79
    ctx->SetOutputDim("Out", out_dims);
Q
Qiao Longfei 已提交
80
    ctx->ShareLoD("X", /*->*/ "Out");
81
  }
P
phlrain 已提交
82 83 84 85

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
86
    auto inputs = ctx.MultiInput<Tensor>("X");
87 88
    auto input_data_type = framework::proto::VarType::Type(0);
    bool flag = 0;
89 90 91
    for (auto *input : inputs) {
      if (input->IsInitialized() && input->numel() > 0) {
        input_data_type = input->type();
92 93 94 95 96 97 98
        flag = 1;
        break;
      }
    }
    if (flag == 0) {
      PADDLE_THROW("All Inputs of Concat OP are Empty!");
    }
P
phlrain 已提交
99 100 101 102 103 104 105 106 107 108

#ifdef PADDLE_WITH_MKLDNN
    if (platform::CanMKLDNNBeUsed(ctx)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
109 110 111 112
};

class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
113
  void Make() override {
114 115
    AddInput("X", "Input tensors of concat operator.").AsDuplicable();
    AddOutput("Out", "Output tensor of concat operator.");
P
phlrain 已提交
116 117 118 119
    AddAttr<bool>(
        "use_mkldnn",
        "(bool, default false) Indicates if MKL-DNN kernel will be used")
        .SetDefault(false);
120
    AddAttr<int>("axis",
121 122 123 124
                 "The axis along which the input tensors will be concatenated."
                 "The axis could also be negative numbers. Negative axis is "
                 "interpreted as counting from the end of the rank."
                 "i.e., axis + rank(X) th dimension.")
125
        .SetDefault(0);
126 127 128 129 130 131
    AddAttr<bool>("use_quantizer",
                  "(bool, default false) "
                  "Set to true for operators that should be quantized and use "
                  "int8 kernel. "
                  "Only used on CPU.")
        .SetDefault(false);
132 133 134 135 136 137 138 139 140 141 142 143 144
    AddComment(R"DOC(
Concat Operator.

Concatenate the input tensors along dimension axis.
Examples:
  Input[0] = [[1,2],[3,4]]
  Input[1] = [[5,6]]
  axis = 0
  Output = [[1,2],
            [3,4],
            [5,6]]

)DOC");
145 146 147
  }
};

148 149
class ConcatOpGrad : public framework::OperatorWithKernel {
 public:
P
phlrain 已提交
150
  using framework::OperatorWithKernel::OperatorWithKernel;
151

152
  void InferShape(framework::InferShapeContext *ctx) const override {
C
chengduo 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166
    auto in_x = "X";
    auto out_x_g_n = framework::GradVarName(in_x);
    ctx->SetOutputsDim(out_x_g_n, ctx->GetInputsDim(in_x));
    auto &in_names = ctx->Inputs(in_x);
    auto &out_names = ctx->Outputs(out_x_g_n);
    PADDLE_ENFORCE_EQ(
        in_names.size(), out_names.size(),
        "The number of arguments in %s[%d] and %s[%d] is not equal.", in_x,
        in_names.size(), out_x_g_n, out_names.size());
    for (size_t i = 0; i < in_names.size(); ++i) {
      if (out_names[i] != framework::kEmptyVarName) {
        ctx->ShareLoD(in_x, out_x_g_n, i, i);
      }
    }
167
  }
P
phlrain 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    return framework::OpKernelType(
        ctx.Input<Tensor>(framework::GradVarName("Out"))->type(),
        ctx.GetPlace());
  }
};

DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(ConcatOpGradNoNeedBufferVarInference,
                                      "X");

class ConcatGradOpDescMaker : public framework::SingleGradOpDescMaker {
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

 protected:
  std::unique_ptr<framework::OpDesc> Apply() const override {
    std::unique_ptr<framework::OpDesc> op(new framework::OpDesc());
    op->SetType("concat_grad");
    op->SetInput("X", Input("X"));
    op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), InputGrad("X", false));
    op->SetAttrMap(Attrs());
    return op;
  }
195 196
};

197 198 199 200
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
201
REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker,
P
phlrain 已提交
202 203 204
                  ops::ConcatGradOpDescMaker);
REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad,
                  ops::ConcatOpGradNoNeedBufferVarInference);
C
chengduoZH 已提交
205
REGISTER_OP_CPU_KERNEL(
206 207 208 209
    concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, double>,
    ops::ConcatKernel<paddle::platform::CPUDeviceContext, float>,
    ops::ConcatKernel<paddle::platform::CPUDeviceContext, int64_t>,
    ops::ConcatKernel<paddle::platform::CPUDeviceContext, int>);
C
chengduoZH 已提交
210 211
REGISTER_OP_CPU_KERNEL(
    concat_grad,
212 213 214 215
    ops::ConcatGradKernel<paddle::platform::CPUDeviceContext, double>,
    ops::ConcatGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::ConcatGradKernel<paddle::platform::CPUDeviceContext, int64_t>,
    ops::ConcatGradKernel<paddle::platform::CPUDeviceContext, int>);