roi_pool_op.cc 7.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
W
wanghaox 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/roi_pool_op.h"
S
sneaxiy 已提交
16
#include <memory>
W
wanghaox 已提交
17 18 19 20

namespace paddle {
namespace operators {

W
wanghaox 已提交
21
using Tensor = framework::Tensor;
22
using LoDTensor = framework::LoDTensor;
W
wanghaox 已提交
23

W
wanghaox 已提交
24
class ROIPoolOp : public framework::OperatorWithKernel {
W
wanghaox 已提交
25 26 27 28 29
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"),
W
wanghaox 已提交
30 31 32
                   "Input(X) of ROIPoolOp should not be null.");
    PADDLE_ENFORCE(ctx->HasInput("ROIs"),
                   "Input(ROIs) of ROIPoolOp should not be null.");
W
wanghaox 已提交
33
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
W
wanghaox 已提交
34
                   "Output(Out) of ROIPoolOp should not be null.");
W
wanghaox 已提交
35
    PADDLE_ENFORCE(ctx->HasOutput("Argmax"),
W
wanghaox 已提交
36
                   "Output(Argmax) of ROIPoolOp should not be null.");
W
wanghaox 已提交
37
    auto input_dims = ctx->GetInputDim("X");
W
wanghaox 已提交
38
    auto rois_dims = ctx->GetInputDim("ROIs");
F
FDInSky 已提交
39 40 41 42
    if (ctx->HasInput("RoisLod")) {
      auto rois_lod_dims = ctx->GetInputDim("RoisLod");
      PADDLE_ENFORCE(rois_lod_dims.size() == 1, "");
    }
W
wanghaox 已提交
43 44 45
    PADDLE_ENFORCE(input_dims.size() == 4,
                   "The format of input tensor is NCHW.");
    PADDLE_ENFORCE(rois_dims.size() == 2,
46
                   "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
W
wopeizl 已提交
47
                   "given as [[x1, y1, x2, y2], ...].");
W
wanghaox 已提交
48
    PADDLE_ENFORCE(rois_dims[1] == kROISize,
49
                   "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
W
wopeizl 已提交
50
                   "given as [[x1, y1, x2, y2], ...].");
W
wanghaox 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

    int pooled_height = ctx->Attrs().Get<int>("pooled_height");
    int pooled_width = ctx->Attrs().Get<int>("pooled_width");
    float spatial_scale = ctx->Attrs().Get<float>("spatial_scale");

    PADDLE_ENFORCE_GT(pooled_height, 0,
                      "The pooled output height must greater than 0");
    PADDLE_ENFORCE_GT(pooled_width, 0,
                      "The pooled output width must greater than 0");
    PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
                      "The spatial scale must greater than 0");

    auto out_dims = input_dims;
    out_dims[0] = rois_dims[0];
    out_dims[1] = input_dims[1];
    out_dims[2] = pooled_height;
    out_dims[3] = pooled_width;

    ctx->SetOutputDim("Out", out_dims);
    ctx->SetOutputDim("Argmax", out_dims);
71
  }
W
wanghaox 已提交
72 73

 protected:
74
  framework::OpKernelType GetExpectedKernelType(
W
wanghaox 已提交
75
      const framework::ExecutionContext& ctx) const override {
76 77 78
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"),
        ctx.device_context());
W
wanghaox 已提交
79 80 81
  }
};

W
wanghaox 已提交
82
class ROIPoolGradOp : public framework::OperatorWithKernel {
W
wanghaox 已提交
83 84 85 86 87 88 89 90 91 92 93 94
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "The gradient of Out should not be null.");
    PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
                   "The gradient of X should not be null.");
    ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
  }

 protected:
95
  framework::OpKernelType GetExpectedKernelType(
W
wanghaox 已提交
96
      const framework::ExecutionContext& ctx) const override {
97 98 99
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"),
        ctx.device_context());
W
wanghaox 已提交
100 101 102
  }
};

W
wanghaox 已提交
103
class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker {
W
wanghaox 已提交
104
 public:
Y
Yu Yang 已提交
105
  void Make() override {
W
wanghaox 已提交
106 107
    AddInput("X",
             "(Tensor), "
W
wanghaox 已提交
108 109 110 111 112 113
             "the input of ROIPoolOp. "
             "The format of input tensor is NCHW. Where N is batch size, "
             "C is the number of input channels, "
             "H is the height of the feature, and "
             "W is the width of the feature.");
    AddInput("ROIs",
114
             "(LoDTensor), "
W
wanghaox 已提交
115
             "ROIs (Regions of Interest) to pool over. "
116
             "should be a 2-D LoDTensor of shape (num_rois, 4)"
W
wopeizl 已提交
117
             "given as [[x1, y1, x2, y2], ...]. "
W
wanghaox 已提交
118 119 120
             "Where batch_id is the id of the data, "
             "(x1, y1) is the top left coordinates, and "
             "(x2, y2) is the bottom right coordinates.");
F
FDInSky 已提交
121
    AddInput("RoisLod", "(Tensor), The lod info of rois.").AsDispensable();
W
wanghaox 已提交
122 123
    AddOutput("Out",
              "(Tensor), "
W
wanghaox 已提交
124 125
              "The output of ROIPoolOp is a 4-D tensor with shape "
              "(num_rois, channels, pooled_h, pooled_w).");
W
wanghaox 已提交
126 127 128 129
    AddOutput("Argmax",
              "(Tensor), "
              "Argmaxes corresponding to indices in X used "
              "for gradient computation. Only output "
P
peizhilin 已提交
130
              "if arg \"is_test\" is false.")
131
        .AsIntermediate();
W
wanghaox 已提交
132
    AddAttr<float>("spatial_scale",
W
wanghaox 已提交
133 134 135 136
                   "(float, default 1.0), "
                   "Multiplicative spatial scale factor "
                   "to translate ROI coords from their input scale "
                   "to the scale used when pooling.")
137
        .SetDefault(1.0);
W
wanghaox 已提交
138
    AddAttr<int>("pooled_height",
W
wanghaox 已提交
139 140
                 "(int, default 1), "
                 "The pooled output height.")
141
        .SetDefault(1);
W
wanghaox 已提交
142
    AddAttr<int>("pooled_width",
W
wanghaox 已提交
143 144
                 "(int, default 1), "
                 "The pooled output width.")
145
        .SetDefault(1);
W
wanghaox 已提交
146
    AddComment(R"DOC(
Y
yi.wu 已提交
147
**ROIPool Operator**
W
wanghaox 已提交
148

Y
yi.wu 已提交
149 150 151 152 153
Region of interest pooling (also known as RoI pooling) is to perform
is to perform max pooling on inputs of nonuniform sizes to obtain
fixed-size feature maps (e.g. 7*7).

The operator has three steps:
Y
yi.wu 已提交
154

Y
yi.wu 已提交
155 156
1. Dividing each region proposal into equal-sized sections with
   the pooled_width and pooled_height
Y
update  
yi.wu 已提交
157

Y
yi.wu 已提交
158
2. Finding the largest value in each section
Y
update  
yi.wu 已提交
159

Y
yi.wu 已提交
160 161
3. Copying these max values to the output buffer

W
wanghaox 已提交
162 163 164 165 166 167
ROI Pooling for Faster-RCNN. The link below is a further introduction: 
https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
    )DOC");
  }
};

H
hong 已提交
168 169
template <typename T>
class ROIPoolGradMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
170
 public:
H
hong 已提交
171
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
172 173

 protected:
174
  void Apply(GradOpPtr<T> op) const override {
S
sneaxiy 已提交
175
    op->SetType("roi_pool_grad");
H
hong 已提交
176 177
    op->SetInput("X", this->Input("X"));
    op->SetInput("ROIs", this->Input("ROIs"));
F
FDInSky 已提交
178
    op->SetInput("RoisLod", this->Input("RoisLod"));
H
hong 已提交
179 180 181 182
    op->SetInput("Argmax", this->Output("Argmax"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
183 184 185
  }
};

W
wanghaox 已提交
186 187 188 189
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
190
REGISTER_OPERATOR(roi_pool, ops::ROIPoolOp, ops::ROIPoolOpMaker,
H
hong 已提交
191 192
                  ops::ROIPoolGradMaker<paddle::framework::OpDesc>,
                  ops::ROIPoolGradMaker<paddle::imperative::OpBase>);
193
REGISTER_OPERATOR(roi_pool_grad, ops::ROIPoolGradOp);
W
wanghaox 已提交
194
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
195 196
    roi_pool,
    ops::CPUROIPoolOpKernel<paddle::platform::CPUDeviceContext, float>,
F
FDInSky 已提交
197 198
    ops::CPUROIPoolOpKernel<paddle::platform::CPUDeviceContext, double>,
    ops::CPUROIPoolOpKernel<paddle::platform::CPUDeviceContext, int>);
W
wanghaox 已提交
199 200
REGISTER_OP_CPU_KERNEL(
    roi_pool_grad,
Q
QI JUN 已提交
201
    ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, float>,
F
FDInSky 已提交
202 203
    ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, double>,
    ops::CPUROIPoolGradOpKernel<paddle::platform::CPUDeviceContext, int>);