generate_proposal_labels_op.cc 25.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <math.h>
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
17
#include "paddle/fluid/operators/detection/bbox_util.h"
18
#include "paddle/fluid/operators/gather.h"
C
chengduo 已提交
19
#include "paddle/fluid/operators/math/concat_and_split.h"
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
#include "paddle/fluid/operators/math/math_function.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
const int kBoxDim = 4;

template <typename T>
void AppendRois(LoDTensor* out, int64_t offset, Tensor* to_add) {
  auto* out_data = out->data<T>();
  auto* to_add_data = to_add->data<T>();
  memcpy(out_data + offset, to_add_data, to_add->numel() * sizeof(T));
}

class GenerateProposalLabelsOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("RpnRois"),
                   "Input(RpnRois) shouldn't be null.");
    PADDLE_ENFORCE(ctx->HasInput("GtClasses"),
                   "Input(GtClasses) shouldn't be null.");
45 46
    PADDLE_ENFORCE(ctx->HasInput("IsCrowd"),
                   "Input(IsCrowd) shouldn't be null.");
47 48
    PADDLE_ENFORCE(ctx->HasInput("GtBoxes"),
                   "Input(GtBoxes) shouldn't be null.");
49
    PADDLE_ENFORCE(ctx->HasInput("ImInfo"), "Input(ImInfo) shouldn't be null.");
50

51 52 53
    PADDLE_ENFORCE(
        ctx->HasOutput("Rois"),
        "Output(Rois) of GenerateProposalLabelsOp should not be null");
54 55
    PADDLE_ENFORCE(
        ctx->HasOutput("LabelsInt32"),
56
        "Output(LabelsInt32) of GenerateProposalLabelsOp should not be null");
57 58
    PADDLE_ENFORCE(
        ctx->HasOutput("BboxTargets"),
59 60 61 62 63 64 65
        "Output(BboxTargets) of GenerateProposalLabelsOp should not be null");
    PADDLE_ENFORCE(ctx->HasOutput("BboxInsideWeights"),
                   "Output(BboxInsideWeights) of GenerateProposalLabelsOp "
                   "should not be null");
    PADDLE_ENFORCE(ctx->HasOutput("BboxOutsideWeights"),
                   "Output(BboxOutsideWeights) of GenerateProposalLabelsOp "
                   "should not be null");
66 67 68

    auto rpn_rois_dims = ctx->GetInputDim("RpnRois");
    auto gt_boxes_dims = ctx->GetInputDim("GtBoxes");
69
    auto im_info_dims = ctx->GetInputDim("ImInfo");
70 71 72 73 74

    PADDLE_ENFORCE_EQ(rpn_rois_dims.size(), 2,
                      "The rank of Input(RpnRois) must be 2.");
    PADDLE_ENFORCE_EQ(gt_boxes_dims.size(), 2,
                      "The rank of Input(GtBoxes) must be 2.");
75 76
    PADDLE_ENFORCE_EQ(im_info_dims.size(), 2,
                      "The rank of Input(ImInfo) must be 2.");
77 78 79 80

    int class_nums = ctx->Attrs().Get<int>("class_nums");

    ctx->SetOutputDim("Rois", {-1, 4});
81
    ctx->SetOutputDim("LabelsInt32", {-1, 1});
82 83 84 85 86 87 88 89
    ctx->SetOutputDim("BboxTargets", {-1, 4 * class_nums});
    ctx->SetOutputDim("BboxInsideWeights", {-1, 4 * class_nums});
    ctx->SetOutputDim("BboxOutsideWeights", {-1, 4 * class_nums});
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
90
    auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "RpnRois");
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
    return framework::OpKernelType(data_type, platform::CPUPlace());
  }
};

template <typename T>
void Concat(const platform::CPUDeviceContext& context,
            const Tensor& in_tensor_a, const Tensor& in_tensor_b,
            Tensor* out_tensor) {
  int axis = 0;
  std::vector<Tensor> inputs;
  inputs.emplace_back(in_tensor_a);
  inputs.emplace_back(in_tensor_b);
  math::ConcatFunctor<platform::CPUDeviceContext, T> concat_functor;
  concat_functor(context, inputs, axis, out_tensor);
}

template <typename T>
std::vector<std::vector<int>> SampleFgBgGt(
    const platform::CPUDeviceContext& context, Tensor* iou,
110 111
    const Tensor& is_crowd, const int batch_size_per_im,
    const float fg_fraction, const float fg_thresh, const float bg_thresh_hi,
112 113
    const float bg_thresh_lo, std::minstd_rand engine, const bool use_random,
    const bool is_cascade_rcnn, const Tensor& rpn_rois) {
114 115
  std::vector<int> fg_inds;
  std::vector<int> bg_inds;
116
  std::vector<int> mapped_gt_inds;
117 118 119
  int64_t gt_num = is_crowd.numel();
  const int* crowd_data = is_crowd.data<int>();
  T* proposal_to_gt_overlaps = iou->data<T>();
120 121 122
  int64_t row = iou->dims()[0];
  int64_t col = iou->dims()[1];
  float epsilon = 0.00001;
123
  const T* rpn_rois_dt = rpn_rois.data<T>();
124 125 126
  // Follow the Faster RCNN's implementation
  for (int64_t i = 0; i < row; ++i) {
    const T* v = proposal_to_gt_overlaps + i * col;
127

128
    T max_overlap = *std::max_element(v, v + col);
129 130 131
    if ((i < gt_num) && (crowd_data[i])) {
      max_overlap = -1.0;
    }
132 133 134 135 136 137 138
    if (is_cascade_rcnn &&
        ((rpn_rois_dt[i * 4 + 2] - rpn_rois_dt[i * 4 + 0] + 1) <= 0 ||
         (rpn_rois_dt[i * 4 + 3] - rpn_rois_dt[i * 4 + 1] + 1) <= 0)) {
      continue;
    }
    if (max_overlap >= fg_thresh) {
      // fg mapped gt label index
139 140 141 142 143
      for (int64_t j = 0; j < col; ++j) {
        T val = proposal_to_gt_overlaps[i * col + j];
        auto diff = std::abs(max_overlap - val);
        if (diff < epsilon) {
          fg_inds.emplace_back(i);
144
          mapped_gt_inds.emplace_back(j);
145 146 147
          break;
        }
      }
148 149
    } else if ((max_overlap >= bg_thresh_lo) && (max_overlap < bg_thresh_hi)) {
      bg_inds.emplace_back(i);
150
    } else {
151
      continue;
152 153 154
    }
  }

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
  std::vector<std::vector<int>> res;
  if (is_cascade_rcnn) {
    res.emplace_back(fg_inds);
    res.emplace_back(bg_inds);
    res.emplace_back(mapped_gt_inds);
  } else {
    // Reservoir Sampling
    // sampling fg
    std::uniform_real_distribution<float> uniform(0, 1);
    int fg_rois_per_im = std::floor(batch_size_per_im * fg_fraction);
    int fg_rois_this_image = fg_inds.size();
    int fg_rois_per_this_image = std::min(fg_rois_per_im, fg_rois_this_image);
    if (use_random) {
      const int64_t fg_size = static_cast<int64_t>(fg_inds.size());
      if (fg_size > fg_rois_per_this_image) {
        for (int64_t i = fg_rois_per_this_image; i < fg_size; ++i) {
          int rng_ind = std::floor(uniform(engine) * i);
          if (rng_ind < fg_rois_per_this_image) {
            std::iter_swap(fg_inds.begin() + rng_ind, fg_inds.begin() + i);
            std::iter_swap(mapped_gt_inds.begin() + rng_ind,
                           mapped_gt_inds.begin() + i);
          }
177
        }
178 179
      }
    }
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
    std::vector<int> new_fg_inds(fg_inds.begin(),
                                 fg_inds.begin() + fg_rois_per_this_image);
    std::vector<int> new_gt_inds(
        mapped_gt_inds.begin(),
        mapped_gt_inds.begin() + fg_rois_per_this_image);
    // sampling bg
    int bg_rois_per_image = batch_size_per_im - fg_rois_per_this_image;
    int bg_rois_this_image = bg_inds.size();
    int bg_rois_per_this_image =
        std::min(bg_rois_per_image, bg_rois_this_image);
    if (use_random) {
      const int64_t bg_size = static_cast<int64_t>(bg_inds.size());
      if (bg_size > bg_rois_per_this_image) {
        for (int64_t i = bg_rois_per_this_image; i < bg_size; ++i) {
          int rng_ind = std::floor(uniform(engine) * i);
          if (rng_ind < fg_rois_per_this_image)
            std::iter_swap(bg_inds.begin() + rng_ind, bg_inds.begin() + i);
        }
198
      }
199
    }
200 201 202 203 204 205
    std::vector<int> new_bg_inds(bg_inds.begin(),
                                 bg_inds.begin() + bg_rois_per_this_image);
    //
    res.emplace_back(new_fg_inds);
    res.emplace_back(new_bg_inds);
    res.emplace_back(new_gt_inds);
206
  }
207

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
  return res;
}

template <typename T>
void GatherBoxesLabels(const platform::CPUDeviceContext& context,
                       const Tensor& boxes, const Tensor& gt_boxes,
                       const Tensor& gt_classes,
                       const std::vector<int>& fg_inds,
                       const std::vector<int>& bg_inds,
                       const std::vector<int>& gt_inds, Tensor* sampled_boxes,
                       Tensor* sampled_labels, Tensor* sampled_gts) {
  int fg_num = fg_inds.size();
  int bg_num = bg_inds.size();
  Tensor fg_inds_t, bg_inds_t, gt_box_inds_t, gt_label_inds_t;
  int* fg_inds_data = fg_inds_t.mutable_data<int>({fg_num}, context.GetPlace());
  int* bg_inds_data = bg_inds_t.mutable_data<int>({bg_num}, context.GetPlace());
  int* gt_box_inds_data =
225
      gt_box_inds_t.mutable_data<int>({fg_num}, context.GetPlace());
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  int* gt_label_inds_data =
      gt_label_inds_t.mutable_data<int>({fg_num}, context.GetPlace());
  std::copy(fg_inds.begin(), fg_inds.end(), fg_inds_data);
  std::copy(bg_inds.begin(), bg_inds.end(), bg_inds_data);
  std::copy(gt_inds.begin(), gt_inds.end(), gt_box_inds_data);
  std::copy(gt_inds.begin(), gt_inds.end(), gt_label_inds_data);

  Tensor fg_boxes, bg_boxes, fg_labels, bg_labels;
  fg_boxes.mutable_data<T>({fg_num, kBoxDim}, context.GetPlace());
  CPUGather<T>(context, boxes, fg_inds_t, &fg_boxes);
  bg_boxes.mutable_data<T>({bg_num, kBoxDim}, context.GetPlace());
  CPUGather<T>(context, boxes, bg_inds_t, &bg_boxes);
  Concat<T>(context, fg_boxes, bg_boxes, sampled_boxes);
  CPUGather<T>(context, gt_boxes, gt_box_inds_t, sampled_gts);
  fg_labels.mutable_data<int>({fg_num}, context.GetPlace());
  CPUGather<int>(context, gt_classes, gt_label_inds_t, &fg_labels);
  bg_labels.mutable_data<int>({bg_num}, context.GetPlace());
  math::set_constant(context, &bg_labels, 0);
  Concat<int>(context, fg_labels, bg_labels, sampled_labels);
}

template <typename T>
std::vector<Tensor> SampleRoisForOneImage(
249 250 251 252
    const platform::CPUDeviceContext& context, const Tensor& rpn_rois_in,
    const Tensor& gt_classes, const Tensor& is_crowd, const Tensor& gt_boxes,
    const Tensor& im_info, const int batch_size_per_im, const float fg_fraction,
    const float fg_thresh, const float bg_thresh_hi, const float bg_thresh_lo,
253
    const std::vector<float>& bbox_reg_weights, const int class_nums,
254 255 256
    std::minstd_rand engine, bool use_random, bool is_cascade_rcnn,
    bool is_cls_agnostic) {
  // 1.1 map to original image
257
  auto im_scale = im_info.data<T>()[2];
258

259 260 261 262 263 264 265 266 267
  Tensor rpn_rois;
  rpn_rois.mutable_data<T>(rpn_rois_in.dims(), context.GetPlace());
  const T* rpn_rois_in_dt = rpn_rois_in.data<T>();
  T* rpn_rois_dt = rpn_rois.data<T>();
  int gt_num = gt_boxes.dims()[0] * 4;
  for (int i = 0; i < rpn_rois.numel(); ++i) {
    if (i < gt_num && is_cascade_rcnn) {
      rpn_rois_dt[i] = rpn_rois_in_dt[i];
    } else {
268 269
      rpn_rois_dt[i] = rpn_rois_in_dt[i] / im_scale;
    }
270
  }
271

272
  // 1.2 compute overlaps
273 274 275 276
  int proposals_num = rpn_rois.dims()[0];
  if (!is_cascade_rcnn) {
    proposals_num += gt_boxes.dims()[0];
  }
277
  Tensor proposal_to_gt_overlaps;
278
  proposal_to_gt_overlaps.mutable_data<T>({proposals_num, gt_boxes.dims()[0]},
279 280
                                          context.GetPlace());

281 282 283 284 285 286 287 288 289 290 291
  Tensor boxes;
  boxes.mutable_data<T>({proposals_num, kBoxDim}, context.GetPlace());
  if (!is_cascade_rcnn) {
    Concat<T>(context, gt_boxes, rpn_rois, &boxes);
  } else {
    T* boxes_dt = boxes.data<T>();
    for (int i = 0; i < boxes.numel(); ++i) {
      boxes_dt[i] = rpn_rois_dt[i];
    }
  }
  BboxOverlaps<T>(boxes, gt_boxes, &proposal_to_gt_overlaps);
292
  // Generate proposal index
293 294 295 296
  std::vector<std::vector<int>> fg_bg_gt =
      SampleFgBgGt<T>(context, &proposal_to_gt_overlaps, is_crowd,
                      batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi,
                      bg_thresh_lo, engine, use_random, is_cascade_rcnn, boxes);
297 298
  std::vector<int> fg_inds = fg_bg_gt[0];
  std::vector<int> bg_inds = fg_bg_gt[1];
299
  std::vector<int> mapped_gt_inds = fg_bg_gt[2];  // mapped_gt_labels
300 301 302

  // Gather boxes and labels
  Tensor sampled_boxes, sampled_labels, sampled_gts;
303 304 305
  int fg_num = fg_inds.size();
  int bg_num = bg_inds.size();
  int boxes_num = fg_num + bg_num;
306 307 308
  framework::DDim bbox_dim({boxes_num, kBoxDim});
  sampled_boxes.mutable_data<T>(bbox_dim, context.GetPlace());
  sampled_labels.mutable_data<int>({boxes_num}, context.GetPlace());
309
  sampled_gts.mutable_data<T>({fg_num, kBoxDim}, context.GetPlace());
310
  GatherBoxesLabels<T>(context, boxes, gt_boxes, gt_classes, fg_inds, bg_inds,
311 312
                       mapped_gt_inds, &sampled_boxes, &sampled_labels,
                       &sampled_gts);
313 314 315 316

  // Compute targets
  Tensor bbox_targets_single;
  bbox_targets_single.mutable_data<T>(bbox_dim, context.GetPlace());
317 318
  BoxToDelta<T>(fg_num, sampled_boxes, sampled_gts, bbox_reg_weights.data(),
                false, &bbox_targets_single);
319 320 321 322 323 324

  // Scale rois
  Tensor sampled_rois;
  sampled_rois.mutable_data<T>(sampled_boxes.dims(), context.GetPlace());
  auto sampled_rois_et = framework::EigenTensor<T, 2>::From(sampled_rois);
  auto sampled_boxes_et = framework::EigenTensor<T, 2>::From(sampled_boxes);
325
  sampled_rois_et = sampled_boxes_et * im_scale;
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345

  // Expand box targets
  Tensor bbox_targets, bbox_inside_weights, bbox_outside_weights;
  framework::DDim bbox_expand_dim({boxes_num, kBoxDim * class_nums});
  bbox_targets.mutable_data<T>(bbox_expand_dim, context.GetPlace());
  bbox_inside_weights.mutable_data<T>(bbox_expand_dim, context.GetPlace());
  bbox_outside_weights.mutable_data<T>(bbox_expand_dim, context.GetPlace());
  math::set_constant(context, &bbox_targets, 0.0);
  math::set_constant(context, &bbox_inside_weights, 0.0);
  math::set_constant(context, &bbox_outside_weights, 0.0);

  auto* bbox_targets_single_data = bbox_targets_single.data<T>();
  auto* sampled_labels_data = sampled_labels.data<int>();
  auto* bbox_targets_data = bbox_targets.data<T>();
  auto* bbox_inside_weights_data = bbox_inside_weights.data<T>();
  auto* bbox_outside_weights_data = bbox_outside_weights.data<T>();
  int width = kBoxDim * class_nums;
  for (int64_t i = 0; i < boxes_num; ++i) {
    int label = sampled_labels_data[i];
    if (label > 0) {
346 347 348
      if (is_cls_agnostic) {
        label = 1;
      }
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
      int dst_idx = i * width + kBoxDim * label;
      int src_idx = kBoxDim * i;
      bbox_targets_data[dst_idx] = bbox_targets_single_data[src_idx];
      bbox_targets_data[dst_idx + 1] = bbox_targets_single_data[src_idx + 1];
      bbox_targets_data[dst_idx + 2] = bbox_targets_single_data[src_idx + 2];
      bbox_targets_data[dst_idx + 3] = bbox_targets_single_data[src_idx + 3];
      bbox_inside_weights_data[dst_idx] = 1;
      bbox_inside_weights_data[dst_idx + 1] = 1;
      bbox_inside_weights_data[dst_idx + 2] = 1;
      bbox_inside_weights_data[dst_idx + 3] = 1;
      bbox_outside_weights_data[dst_idx] = 1;
      bbox_outside_weights_data[dst_idx + 1] = 1;
      bbox_outside_weights_data[dst_idx + 2] = 1;
      bbox_outside_weights_data[dst_idx + 3] = 1;
    }
  }
  std::vector<Tensor> res;
  res.emplace_back(sampled_rois);
  res.emplace_back(sampled_labels);
  res.emplace_back(bbox_targets);
  res.emplace_back(bbox_inside_weights);
  res.emplace_back(bbox_outside_weights);
  return res;
}

template <typename T>
class GenerateProposalLabelsKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* rpn_rois = context.Input<LoDTensor>("RpnRois");
    auto* gt_classes = context.Input<LoDTensor>("GtClasses");
380
    auto* is_crowd = context.Input<LoDTensor>("IsCrowd");
381
    auto* gt_boxes = context.Input<LoDTensor>("GtBoxes");
382
    auto* im_info = context.Input<LoDTensor>("ImInfo");
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398

    auto* rois = context.Output<LoDTensor>("Rois");
    auto* labels_int32 = context.Output<LoDTensor>("LabelsInt32");
    auto* bbox_targets = context.Output<LoDTensor>("BboxTargets");
    auto* bbox_inside_weights = context.Output<LoDTensor>("BboxInsideWeights");
    auto* bbox_outside_weights =
        context.Output<LoDTensor>("BboxOutsideWeights");

    int batch_size_per_im = context.Attr<int>("batch_size_per_im");
    float fg_fraction = context.Attr<float>("fg_fraction");
    float fg_thresh = context.Attr<float>("fg_thresh");
    float bg_thresh_hi = context.Attr<float>("bg_thresh_hi");
    float bg_thresh_lo = context.Attr<float>("bg_thresh_lo");
    std::vector<float> bbox_reg_weights =
        context.Attr<std::vector<float>>("bbox_reg_weights");
    int class_nums = context.Attr<int>("class_nums");
399
    bool use_random = context.Attr<bool>("use_random");
400 401
    bool is_cascade_rcnn = context.Attr<bool>("is_cascade_rcnn");
    bool is_cls_agnostic = context.Attr<bool>("is_cls_agnostic");
402 403 404 405 406
    PADDLE_ENFORCE_EQ(rpn_rois->lod().size(), 1UL,
                      "GenerateProposalLabelsOp rpn_rois needs 1 level of LoD");
    PADDLE_ENFORCE_EQ(
        gt_classes->lod().size(), 1UL,
        "GenerateProposalLabelsOp gt_classes needs 1 level of LoD");
407 408
    PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL,
                      "GenerateProposalLabelsOp is_crowd needs 1 level of LoD");
409 410 411 412 413
    PADDLE_ENFORCE_EQ(gt_boxes->lod().size(), 1UL,
                      "GenerateProposalLabelsOp gt_boxes needs 1 level of LoD");
    int64_t n = static_cast<int64_t>(rpn_rois->lod().back().size() - 1);

    rois->mutable_data<T>({n * batch_size_per_im, kBoxDim}, context.GetPlace());
414
    labels_int32->mutable_data<int>({n * batch_size_per_im, 1},
415 416 417 418 419 420 421 422 423 424
                                    context.GetPlace());
    bbox_targets->mutable_data<T>({n * batch_size_per_im, kBoxDim * class_nums},
                                  context.GetPlace());
    bbox_inside_weights->mutable_data<T>(
        {n * batch_size_per_im, kBoxDim * class_nums}, context.GetPlace());
    bbox_outside_weights->mutable_data<T>(
        {n * batch_size_per_im, kBoxDim * class_nums}, context.GetPlace());

    std::random_device rnd;
    std::minstd_rand engine;
425
    int seed = rnd();
426 427 428 429 430 431 432 433 434 435
    engine.seed(seed);

    framework::LoD lod;
    std::vector<size_t> lod0(1, 0);

    int64_t num_rois = 0;
    auto& dev_ctx = context.device_context<platform::CPUDeviceContext>();

    auto rpn_rois_lod = rpn_rois->lod().back();
    auto gt_classes_lod = gt_classes->lod().back();
436
    auto is_crowd_lod = is_crowd->lod().back();
437
    auto gt_boxes_lod = gt_boxes->lod().back();
438
    for (int i = 0; i < n; ++i) {
439 440 441 442
      if (rpn_rois_lod[i] == rpn_rois_lod[i + 1]) {
        lod0.emplace_back(num_rois);
        continue;
      }
443 444 445 446
      Tensor rpn_rois_slice =
          rpn_rois->Slice(rpn_rois_lod[i], rpn_rois_lod[i + 1]);
      Tensor gt_classes_slice =
          gt_classes->Slice(gt_classes_lod[i], gt_classes_lod[i + 1]);
447 448
      Tensor is_crowd_slice =
          is_crowd->Slice(is_crowd_lod[i], is_crowd_lod[i + 1]);
449 450
      Tensor gt_boxes_slice =
          gt_boxes->Slice(gt_boxes_lod[i], gt_boxes_lod[i + 1]);
451
      Tensor im_info_slice = im_info->Slice(i, i + 1);
452
      std::vector<Tensor> tensor_output = SampleRoisForOneImage<T>(
453 454
          dev_ctx, rpn_rois_slice, gt_classes_slice, is_crowd_slice,
          gt_boxes_slice, im_info_slice, batch_size_per_im, fg_fraction,
455
          fg_thresh, bg_thresh_hi, bg_thresh_lo, bbox_reg_weights, class_nums,
456
          engine, use_random, is_cascade_rcnn, is_cls_agnostic);
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
      Tensor sampled_rois = tensor_output[0];
      Tensor sampled_labels_int32 = tensor_output[1];
      Tensor sampled_bbox_targets = tensor_output[2];
      Tensor sampled_bbox_inside_weights = tensor_output[3];
      Tensor sampled_bbox_outside_weights = tensor_output[4];

      AppendRois<T>(rois, kBoxDim * num_rois, &sampled_rois);
      AppendRois<int>(labels_int32, num_rois, &sampled_labels_int32);
      AppendRois<T>(bbox_targets, kBoxDim * num_rois * class_nums,
                    &sampled_bbox_targets);
      AppendRois<T>(bbox_inside_weights, kBoxDim * num_rois * class_nums,
                    &sampled_bbox_inside_weights);
      AppendRois<T>(bbox_outside_weights, kBoxDim * num_rois * class_nums,
                    &sampled_bbox_outside_weights);

      num_rois += sampled_rois.dims()[0];
      lod0.emplace_back(num_rois);
    }

    lod.emplace_back(lod0);
    rois->set_lod(lod);
    labels_int32->set_lod(lod);
    bbox_targets->set_lod(lod);
    bbox_inside_weights->set_lod(lod);
    bbox_outside_weights->set_lod(lod);
    rois->Resize({num_rois, kBoxDim});
483
    labels_int32->Resize({num_rois, 1});
484 485 486 487 488 489 490 491 492
    bbox_targets->Resize({num_rois, kBoxDim * class_nums});
    bbox_inside_weights->Resize({num_rois, kBoxDim * class_nums});
    bbox_outside_weights->Resize({num_rois, kBoxDim * class_nums});
  }
};

class GenerateProposalLabelsOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
B
buxingyuan 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
    AddInput(
        "RpnRois",
        "(LoDTensor), This input is a 2D LoDTensor with shape [N, 4]. "
        "N is the number of the GenerateProposalOp's output, "
        "each element is a bounding box with [xmin, ymin, xmax, ymax] format.");
    AddInput("GtClasses",
             "(LoDTensor), This input is a 2D LoDTensor with shape [M, 1]. "
             "M is the number of groundtruth, "
             "each element is a class label of groundtruth.");
    AddInput(
        "IsCrowd",
        "(LoDTensor), This input is a 2D LoDTensor with shape [M, 1]. "
        "M is the number of groundtruth, "
        "each element is a flag indicates whether a groundtruth is crowd.");
    AddInput(
        "GtBoxes",
        "(LoDTensor), This input is a 2D LoDTensor with shape [M, 4]. "
        "M is the number of groundtruth, "
        "each element is a bounding box with [xmin, ymin, xmax, ymax] format.");
    AddInput("ImInfo",
             "(Tensor), This input is a 2D Tensor with shape [B, 3]. "
             "B is the number of input images, "
             "each element consists of im_height, im_width, im_scale.");

    AddOutput(
        "Rois",
        "(LoDTensor), This output is a 2D LoDTensor with shape [P, 4]. "
        "P usuall equal to  batch_size_per_im * batch_size, "
        "each element is a bounding box with [xmin, ymin, xmax, ymax] format.");
    AddOutput("LabelsInt32",
523
              "(LoDTensor), This output is a 2D LoDTensor with shape [P, 1], "
T
tianshuo78520a 已提交
524
              "each element represents a class label of a roi");
B
buxingyuan 已提交
525 526 527
    AddOutput("BboxTargets",
              "(LoDTensor), This output is a 2D LoDTensor with shape [P, 4 * "
              "class_nums], "
T
tianshuo78520a 已提交
528
              "each element represents a box label of a roi");
B
buxingyuan 已提交
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
    AddOutput(
        "BboxInsideWeights",
        "(LoDTensor), This output is a 2D LoDTensor with shape [P, 4 * "
        "class_nums], "
        "each element indicates whether a box should contribute to loss.");
    AddOutput(
        "BboxOutsideWeights",
        "(LoDTensor), This output is a 2D LoDTensor with shape [P, 4 * "
        "class_nums], "
        "each element indicates whether a box should contribute to loss.");

    AddAttr<int>("batch_size_per_im", "Batch size of rois per images.");
    AddAttr<float>("fg_fraction",
                   "Foreground fraction in total batch_size_per_im.");
    AddAttr<float>(
        "fg_thresh",
        "Overlap threshold which is used to chose foreground sample.");
    AddAttr<float>("bg_thresh_hi",
                   "Overlap threshold upper bound which is used to chose "
                   "background sample.");
    AddAttr<float>("bg_thresh_lo",
                   "Overlap threshold lower bound which is used to chose "
                   "background sample.");
    AddAttr<std::vector<float>>("bbox_reg_weights", "Box regression weights.");
    AddAttr<int>("class_nums", "Class number.");
    AddAttr<bool>(
        "use_random",
        "Use random sampling to choose foreground and background boxes.")
        .SetDefault(true);
558 559 560 561 562 563 564
    AddAttr<bool>("is_cascade_rcnn",
                  "cascade rcnn sampling policy changed from stage 2.")
        .SetDefault(false);
    AddAttr<bool>(
        "is_cls_agnostic",
        "the box regress will only include fg and bg locations if set true ")
        .SetDefault(false);
565 566

    AddComment(R"DOC(
B
buxingyuan 已提交
567
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
568
to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
569 570 571

RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
572
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
573 574
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
B
buxingyuan 已提交
575
After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
576
then we apply random sampling to make sure
B
buxingyuan 已提交
577
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
578 579 580 581

For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
    )DOC");
582 583 584 585 586 587 588
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
589 590 591 592 593
REGISTER_OPERATOR(
    generate_proposal_labels, ops::GenerateProposalLabelsOp,
    ops::GenerateProposalLabelsOpMaker,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
594 595 596
REGISTER_OP_CPU_KERNEL(generate_proposal_labels,
                       ops::GenerateProposalLabelsKernel<float>,
                       ops::GenerateProposalLabelsKernel<double>);