roi_pool_op.cc 6.3 KB
Newer Older
W
wanghaox 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/roi_pool_op.h"

namespace paddle {
namespace operators {

W
wanghaox 已提交
20 21 22 23
using Tensor = framework::Tensor;

static constexpr int kROISize = 5;

W
wanghaox 已提交
24
class ROIPoolOp : public framework::OperatorWithKernel {
W
wanghaox 已提交
25 26 27 28 29
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"),
W
wanghaox 已提交
30 31 32
                   "Input(X) of ROIPoolOp should not be null.");
    PADDLE_ENFORCE(ctx->HasInput("ROIs"),
                   "Input(ROIs) of ROIPoolOp should not be null.");
W
wanghaox 已提交
33
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
W
wanghaox 已提交
34
                   "Output(Out) of ROIPoolOp should not be null.");
W
wanghaox 已提交
35
    PADDLE_ENFORCE(ctx->HasOutput("Argmax"),
W
wanghaox 已提交
36
                   "Output(Argmax) of ROIPoolOp should not be null.");
W
wanghaox 已提交
37
    auto input_dims = ctx->GetInputDim("X");
W
wanghaox 已提交
38 39 40 41 42 43 44
    auto rois_dims = ctx->GetInputDim("ROIs");

    PADDLE_ENFORCE(input_dims.size() == 4,
                   "The format of input tensor is NCHW.");
    PADDLE_ENFORCE(rois_dims.size() == 2,
                   "ROIs should be a 2-D tensor of shape (num_rois, 5)"
                   "given as [[batch_id, x1, y1, x2, y2], …].");
W
wanghaox 已提交
45
    PADDLE_ENFORCE(rois_dims[1] == kROISize,
46 47
                   "ROIs should be a 2-D tensor of shape (num_rois, 5)"
                   "given as [[batch_id, x1, y1, x2, y2], …].");
W
wanghaox 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

    int pooled_height = ctx->Attrs().Get<int>("pooled_height");
    int pooled_width = ctx->Attrs().Get<int>("pooled_width");
    float spatial_scale = ctx->Attrs().Get<float>("spatial_scale");

    PADDLE_ENFORCE_GT(pooled_height, 0,
                      "The pooled output height must greater than 0");
    PADDLE_ENFORCE_GT(pooled_width, 0,
                      "The pooled output width must greater than 0");
    PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
                      "The spatial scale must greater than 0");

    auto out_dims = input_dims;
    out_dims[0] = rois_dims[0];
    out_dims[1] = input_dims[1];
    out_dims[2] = pooled_height;
    out_dims[3] = pooled_width;

    ctx->SetOutputDim("Out", out_dims);
    ctx->SetOutputDim("Argmax", out_dims);
68
  }
W
wanghaox 已提交
69 70 71 72 73 74 75 76 77 78

 protected:
  framework::OpKernelType GetKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
        framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
        ctx.device_context());
  }
};

W
wanghaox 已提交
79
class ROIPoolGradOp : public framework::OperatorWithKernel {
W
wanghaox 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "The gradient of Out should not be null.");
    PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
                   "The gradient of X should not be null.");
    ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
  }

 protected:
  framework::OpKernelType GetKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
        framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
        ctx.device_context());
  }
};

W
wanghaox 已提交
100
class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker {
W
wanghaox 已提交
101
 public:
102
  ROIPoolOpMaker(OpProto* proto, OpAttrChecker* op_checker)
W
wanghaox 已提交
103 104 105
      : OpProtoAndCheckerMaker(proto, op_checker) {
    AddInput("X",
             "(Tensor), "
W
wanghaox 已提交
106 107 108 109 110 111
             "the input of ROIPoolOp. "
             "The format of input tensor is NCHW. Where N is batch size, "
             "C is the number of input channels, "
             "H is the height of the feature, and "
             "W is the width of the feature.");
    AddInput("ROIs",
W
wanghaox 已提交
112
             "(Tensor), "
W
wanghaox 已提交
113 114 115 116 117 118
             "ROIs (Regions of Interest) to pool over. "
             "should be a 2-D tensor of shape (num_rois, 5)"
             "given as [[batch_id, x1, y1, x2, y2], …]. "
             "Where batch_id is the id of the data, "
             "(x1, y1) is the top left coordinates, and "
             "(x2, y2) is the bottom right coordinates.");
W
wanghaox 已提交
119 120
    AddOutput("Out",
              "(Tensor), "
W
wanghaox 已提交
121 122
              "The output of ROIPoolOp is a 4-D tensor with shape "
              "(num_rois, channels, pooled_h, pooled_w).");
W
wanghaox 已提交
123 124 125 126
    AddOutput("Argmax",
              "(Tensor), "
              "Argmaxes corresponding to indices in X used "
              "for gradient computation. Only output "
127 128
              "if arg “is_test” is false.")
        .AsIntermediate();
W
wanghaox 已提交
129
    AddAttr<float>("spatial_scale",
W
wanghaox 已提交
130 131 132 133
                   "(float, default 1.0), "
                   "Multiplicative spatial scale factor "
                   "to translate ROI coords from their input scale "
                   "to the scale used when pooling.")
134
        .SetDefault(1.0);
W
wanghaox 已提交
135
    AddAttr<int>("pooled_height",
W
wanghaox 已提交
136 137
                 "(int, default 1), "
                 "The pooled output height.")
138
        .SetDefault(1);
W
wanghaox 已提交
139
    AddAttr<int>("pooled_width",
W
wanghaox 已提交
140 141
                 "(int, default 1), "
                 "The pooled output width.")
142
        .SetDefault(1);
W
wanghaox 已提交
143
    AddComment(R"DOC(
W
wanghaox 已提交
144
ROIPool operator
W
wanghaox 已提交
145 146 147 148 149 150 151 152 153 154 155

ROI Pooling for Faster-RCNN. The link below is a further introduction: 
https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
    )DOC");
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
156 157
REGISTER_OP(roi_pool, ops::ROIPoolOp, ops::ROIPoolOpMaker, roi_pool_grad,
            ops::ROIPoolGradOp);
W
wanghaox 已提交
158
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
159 160 161
    roi_pool,
    ops::CPUROIPoolOpKernel<paddle::platform::CPUDeviceContext, float>,
    ops::CPUROIPoolOpKernel<paddle::platform::CPUDeviceContext, double>);
W
wanghaox 已提交
162 163
REGISTER_OP_CPU_KERNEL(
    roi_pool_grad,
Q
QI JUN 已提交
164 165
    ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, float>,
    ops::CPUROIPoolOpKernel<paddle::platform::CPUDeviceContext, double>);