expand_op.cc 5.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
yangyaming 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/expand_op.h"
16
#include <vector>
Y
yangyaming 已提交
17

Y
Yang Yang 已提交
18 19
#include <vector>

Y
yangyaming 已提交
20 21 22 23 24 25 26 27 28 29
namespace paddle {
namespace operators {

using framework::Tensor;

class ExpandOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
Y
yangyaming 已提交
30
  void InferShape(framework::InferShapeContext* ctx) const override {
31 32 33
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null.");

Y
yangyaming 已提交
34
    std::vector<int> expand_times =
35
        ctx->Attrs().Get<std::vector<int>>("expand_times");
Y
yangyaming 已提交
36 37 38
    auto x_dims = ctx->GetInputDim("X");

    PADDLE_ENFORCE_EQ(static_cast<size_t>(x_dims.size()), expand_times.size(),
39
                      "The number of Attr(expand_times)'s value must be equal "
Y
yangyaming 已提交
40
                      "to the rank of Input(X).");
Y
yangyaming 已提交
41
    PADDLE_ENFORCE_LE(x_dims.size(), 6,
Y
yangyaming 已提交
42
                      "The rank of Input(X) must not be greater than 6.");
Y
yangyaming 已提交
43 44 45 46

    std::vector<int64_t> out_shape(x_dims.size());
    for (size_t i = 0; i < expand_times.size(); ++i) {
      PADDLE_ENFORCE_GE(expand_times[i], 1,
47
                        "Each value of Attr(expand_times) should not be "
Y
yangyaming 已提交
48 49 50
                        "less than 1.");
      out_shape[i] = x_dims[i] * expand_times[i];
    }
Y
yangyaming 已提交
51 52

    ctx->SetOutputDim("Out", framework::make_ddim(out_shape));
53 54 55
    if (out_shape[0] == x_dims[0]) {
      ctx->ShareLoD("X", "Out");
    }
Y
yangyaming 已提交
56 57 58 59 60
  }
};

class ExpandOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
61
  ExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker)
Y
yangyaming 已提交
62
      : OpProtoAndCheckerMaker(proto, op_checker) {
Y
yangyaming 已提交
63
    AddInput("X",
C
caoying03 已提交
64 65
             "(Tensor, default Tensor<float>). A tensor with rank in [1, 6]."
             "X is the input to be expanded.");
Y
yangyaming 已提交
66
    AddOutput("Out",
C
caoying03 已提交
67 68 69 70 71
              "(Tensor, default Tensor<float>). A tensor with rank in [1, 6]."
              "The rank of Output(Out) have the same with Input(X). "
              "After expanding, size of each dimension of Output(Out) is equal "
              "to size of the corresponding dimension of Input(X) multiplying "
              "the corresponding value given by Attr(expand_times).");
72
    AddAttr<std::vector<int>>("expand_times",
Y
yangyaming 已提交
73
                              "Expand times number for each dimension.");
Y
yangyaming 已提交
74
    AddComment(R"DOC(
Y
yangyaming 已提交
75
Expand operator tiles the input by given times number. You should set times
76
number for each dimension by providing attribute 'expand_times'. The rank of X
C
caoying03 已提交
77 78
should be in [1, 6]. Please note that size of 'expand_times' must be the same
with X's rank. Following is a using case:
Y
yangyaming 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95

Input(X) is a 3-D tensor with shape [2, 3, 1]:

        [
           [[1], [2], [3]],
           [[4], [5], [6]]
        ]

Attr(expand_times):  [1, 2, 2]

Output(Out) is a 3-D tensor with shape [2, 6, 2]:

        [
            [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
            [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
        ]

Y
yangyaming 已提交
96 97 98 99 100 101 102 103 104
)DOC");
  }
};

class ExpandGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
Y
yangyaming 已提交
105 106 107 108
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) should not be null.");
109

Y
yangyaming 已提交
110 111
    auto x_dims = ctx->GetInputDim("X");
    std::vector<int> expand_times =
112
        ctx->Attrs().Get<std::vector<int>>("expand_times");
Y
yangyaming 已提交
113
    auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
Y
yangyaming 已提交
114 115 116

    for (size_t i = 0; i < expand_times.size(); ++i) {
      PADDLE_ENFORCE_EQ(x_dims[i] * expand_times[i], out_dims[i],
Y
yangyaming 已提交
117 118
                        "Each dimension size of Input(Out@GRAD) should be "
                        "equal to multiplication of crroresponding dimension "
119
                        "size of Input(X) and Attr(expand_times) value.");
Y
yangyaming 已提交
120 121
    }

Y
yangyaming 已提交
122 123 124 125 126
    auto x_grad_name = framework::GradVarName("X");

    if (ctx->HasOutput(x_grad_name)) {
      ctx->SetOutputDim(x_grad_name, x_dims);
    }
Y
yangyaming 已提交
127 128 129 130 131 132 133
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
134 135 136
REGISTER_OPERATOR(expand, ops::ExpandOp, ops::ExpandOpMaker,
                  paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(expand_grad, ops::ExpandGradOp)
Y
yangyaming 已提交
137
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
138 139 140 141
    expand, ops::ExpandKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL(
    expand_grad,
    ops::ExpandGradKernel<paddle::platform::CPUDeviceContext, float>);