sequence_concat_op.cc 5.1 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/sequence_concat_op.h"

namespace paddle {
namespace operators {

class SequenceConcatOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

Y
update  
Yancey1989 已提交
24
  void InferShape(framework::InferShapeContext* ctx) const override {
Y
Yancey1989 已提交
25 26
    PADDLE_ENFORCE(ctx->HasInputs("X"),
                   "Inputs(X) of SequenceConcatOp should not be null.");
Y
Yancey1989 已提交
27 28 29 30 31
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of SequenceConcatOp should not be null.");
    const size_t level = static_cast<size_t>(ctx->Attrs().Get<int>("level"));
    const size_t axis = static_cast<size_t>(ctx->Attrs().Get<int>("axis"));
    PADDLE_ENFORCE(level == 0UL || level == 1UL,
Y
Yancey1989 已提交
32 33
                   "The sequence_concat operator only accepts sequence "
                   "or a nested sequence as its input.");
Y
Yancey1989 已提交
34 35 36
    auto ins_dims = ctx->GetInputsDim("X");
    framework::DDim out_dims = ins_dims[0];
    const size_t n = ins_dims.size();
Y
Yancey1989 已提交
37
    for (size_t i = 1; i < n; ++i) {
Y
Yancey1989 已提交
38 39 40 41 42 43 44 45 46 47 48 49
      out_dims[axis] += ins_dims[i][axis];
    }
    ctx->SetOutputDim("Out", out_dims);
  }
};

class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  SequenceConcatOpMaker(framework::OpProto* proto,
                        framework::OpAttrChecker* op_checker)
      : OpProtoAndCheckerMaker(proto, op_checker) {
    AddInput("X",
50
             "(vector<LoDTensor>) Input is a vector of LoDTensor, "
Y
Yancey1989 已提交
51
             "each of which is a variable-length sequence or nested sequence.")
Y
Yancey1989 已提交
52 53
        .AsDuplicable();
    AddOutput("Out",
54
              "(LoDTensor), Variable-length output of "
Y
Yancey1989 已提交
55
              "sequence_concat Op.");
Y
Yancey1989 已提交
56
    AddAttr<int>("axis",
57 58
                 "(int, default 0) "
                 "The axis along which the inputs will be joined. "
Y
Yancey1989 已提交
59
                 "If axis is 0, the inputs will be joined with LoD index.")
Y
Yancey1989 已提交
60 61
        .SetDefault(0);
    AddAttr<int>("level",
62
                 "(int, default 0) "
Y
update  
Yancey1989 已提交
63
                 "The level at which the inputs will be joined. "
Y
Yancey1989 已提交
64
                 "If the level is 0, the inputs will be joined at the nested "
Y
update  
Yancey1989 已提交
65
                 "sequence level. "
Y
Yancey1989 已提交
66
                 "If the level is 1, the inputs will be joined at the "
Y
update  
Yancey1989 已提交
67 68
                 "sequence level. "
                 "The level should be less than the level number of inputs.")
Y
Yancey1989 已提交
69 70
        .SetDefault(0);
    AddComment(R"DOC(
71
Sequence Concat operator
Y
update  
Yancey1989 已提交
72

73 74 75 76 77 78 79
The sequence_concat operator concatenates multiple LoDTensors.
It only supports sequence (LoD Tensor with level number is 1)
or a nested sequence (LoD tensor with level number is 2) as its input.
- Case1:
  If the axis is other than 0(here, axis is 1 and level is 1),
  each input should have the same LoD information and the LoD
  information of the output keeps the same as the input.
Y
update  
Yancey1989 已提交
80

81 82 83
    LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
    LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
    LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
Y
update  
Yancey1989 已提交
84

85 86 87
- Case2:
  If the axis is 0(here, leve is 0), the inputs are concatenated along
  time steps, the LoD information of the output need to re-compute.
Y
update  
Yancey1989 已提交
88

89 90 91
    LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
    LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4)
    LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4)
Y
update  
Yancey1989 已提交
92

93 94
- Case3:
  If the axis is 0(here, level is 1).
W
wanghaoshuang 已提交
95

96 97 98 99 100
    LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
    LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4)
    LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4)

NOTE: The levels of all the inputs should be the same.
Y
Yancey1989 已提交
101 102 103 104 105 106 107 108
    )DOC");
  }
};

class SequenceConcatGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

Y
update  
Yancey1989 已提交
109
  void InferShape(framework::InferShapeContext* ctx) const override {
Y
Yancey1989 已提交
110
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
Y
Yancey1989 已提交
111 112
                   "The gradient of Out should not be null.");
    PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
Y
Yancey1989 已提交
113
                   "The gradient of X should not be null.");
Y
Yancey1989 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP(sequence_concat, ops::SequenceConcatOp, ops::SequenceConcatOpMaker,
            sequence_concat_grad, ops::SequenceConcatGradOp);
REGISTER_OP_CPU_KERNEL(
    sequence_concat,
    ops::SequenceConcatOpKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
    sequence_concat_grad,
    ops::SequenceConcatGradOpKernel<paddle::platform::CPUPlace, float>);