stack_op.cc 6.9 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/stack_op.h"
16 17
#include <memory>
#include <vector>
X
Xin Pan 已提交
18 19 20

namespace plat = paddle::platform;
namespace ops = paddle::operators;
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

namespace paddle {
namespace operators {

class StackOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE_GT(ctx->Inputs("X").size(), 0,
                      platform::errors::InvalidArgument(
                          "Number of Inputs(X) must be larger than 0, but"
                          " received value is:%d.",
                          ctx->Inputs("X").size()));
    PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true,
                      platform::errors::InvalidArgument(
                          "Output(Y) of stack_op should not be null."));

    auto input_dims = ctx->GetInputsDim("X");
    for (size_t i = 1; i < input_dims.size(); ++i) {
      PADDLE_ENFORCE_EQ(input_dims[i], input_dims[0],
                        platform::errors::InvalidArgument(
                            "Dims of all Inputs(X) must be the same, but"
                            " received input %d dim is:%d not equal to input 0"
                            " dim:%d.",
                            i, input_dims[i], input_dims[0]));
    }

    // Only lod of X[0] would be shared with Y
    ctx->ShareLoD("X", /*->*/ "Y");

    int axis = ctx->Attrs().Get<int>("axis");
    int rank = input_dims[0].size();
    PADDLE_ENFORCE_GE(
        axis, -(rank + 1),
        platform::errors::InvalidArgument(
            "Attr(axis) must be inside [-(rank+1), rank+1), where rank = %d, "
            "but received axis is:%d.",
            rank, axis));

    PADDLE_ENFORCE_LT(
        axis, rank + 1,
        platform::errors::InvalidArgument(
            "Attr(axis) must be inside [-(rank+1), rank+1), where rank = %d, "
            "but received axis is:%d",
            rank, axis));

    if (axis < 0) axis += (rank + 1);

    auto vec = framework::vectorize<int>(input_dims[0]);
    vec.insert(vec.begin() + axis, input_dims.size());
    ctx->SetOutputDim("Y", framework::make_ddim(vec));
  }
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
89 90 91 92 93 94 95 96 97 98
};

class StackOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "The input of stack op.").AsDuplicable();
    AddOutput("Y", "The output of stack op.");
    AddAttr<int>("axis",
                 "The axis along which all of the Inputs(X) should be stacked.")
        .SetDefault(0);
99 100 101 102 103
    AddAttr<bool>(
        "use_mkldnn",
        "(bool, default false) Indicates if MKL-DNN kernel will be used")
        .SetDefault(false)
        .AsExtra();
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    AddComment(R"DOC(
Stack Operator.
Stack all of the Inputs(X) into one tensor along Attr(axis). The dims of all Inputs(X) must be the same.
)DOC");
  }
};

class StackOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE_EQ(
        ctx->HasInput(framework::GradVarName("Y")), true,
        platform::errors::InvalidArgument("Input(Y@Grad) not exist."));

    int axis = ctx->Attrs().Get<int>("axis");
    auto dy_dim = ctx->GetInputDim(framework::GradVarName("Y"));
    int rank = dy_dim.size();
    PADDLE_ENFORCE_GE(
        axis, -rank,
        platform::errors::InvalidArgument(
            "Attr(axis) must be inside [-rank, rank), where rank = %d, "
            "but received axis is:%d.",
            rank, axis));
    PADDLE_ENFORCE_LT(
        axis, rank,
        platform::errors::InvalidArgument(
            "Attr(axis) must be inside [-rank, rank), where rank = %d, "
            "but received axis is:%d.",
            rank, axis));

    if (axis < 0) axis += rank;
    PADDLE_ENFORCE_EQ(
        ctx->Outputs(framework::GradVarName("X")).size(),
        static_cast<size_t>(dy_dim[axis]),
        platform::errors::InvalidArgument(
            "Number of Outputs(X@Grad) is equal to dy dim at axis, but"
            " received outputs size is:%d, dy dims is:%d.",
            ctx->Outputs(framework::GradVarName("X")).size(),
            static_cast<size_t>(dy_dim[axis])));

    auto vec = framework::vectorize<int>(dy_dim);
    vec.erase(vec.begin() + axis);
    ctx->SetOutputsDim(
        framework::GradVarName("X"),
        std::vector<framework::DDim>(dy_dim[axis], framework::make_ddim(vec)));
  }
};

template <typename T>
class StackGradOpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
160
  void Apply(GradOpPtr<T> op) const override {
161 162 163 164 165 166 167 168 169 170
    op->SetType("stack_grad");
    op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X", false));
    op->SetAttrMap(this->Attrs());
  }
};

}  // namespace operators
}  // namespace paddle

X
Xin Pan 已提交
171
REGISTER_OPERATOR(stack, ops::StackOp, ops::StackOpMaker,
H
hong 已提交
172 173
                  ops::StackGradOpMaker<paddle::framework::OpDesc>,
                  ops::StackGradOpMaker<paddle::imperative::OpBase>);
X
Xin Pan 已提交
174 175
REGISTER_OPERATOR(stack_grad, ops::StackOpGrad);

176 177 178 179 180 181 182 183 184 185 186 187 188
REGISTER_OP_CPU_KERNEL(
    stack, ops::StackKernel<plat::CPUDeviceContext, float>,
    ops::StackKernel<plat::CPUDeviceContext, double>,
    ops::StackKernel<plat::CPUDeviceContext, int>,
    ops::StackKernel<plat::CPUDeviceContext, int64_t>,
    ops::StackKernel<plat::CPUDeviceContext, paddle::platform::bfloat16>);

REGISTER_OP_CPU_KERNEL(
    stack_grad, ops::StackGradKernel<plat::CPUDeviceContext, float>,
    ops::StackGradKernel<plat::CPUDeviceContext, double>,
    ops::StackGradKernel<plat::CPUDeviceContext, int>,
    ops::StackGradKernel<plat::CPUDeviceContext, int64_t>,
    ops::StackGradKernel<plat::CPUDeviceContext, paddle::platform::bfloat16>);