detection_map_op.h 17.3 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
W
wanghaox 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
W
wanghaox 已提交
16 17
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
W
wanghaox 已提交
18 19 20 21

namespace paddle {
namespace operators {

W
wanghaox 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
enum APType { kNone = 0, kIntegral, k11point };

APType GetAPType(std::string str) {
  if (str == "integral") {
    return APType::kIntegral;
  } else if (str == "11point") {
    return APType::k11point;
  } else {
    return APType::kNone;
  }
}

template <typename T>
inline bool SortScorePairDescend(const std::pair<float, T>& pair1,
                                 const std::pair<float, T>& pair2) {
  return pair1.first > pair2.first;
}

W
wanghaox 已提交
40 41 42
template <typename T>
inline void GetAccumulation(std::vector<std::pair<T, int>> in_pairs,
                            std::vector<int>* accu_vec) {
W
wanghaox 已提交
43
  std::stable_sort(in_pairs.begin(), in_pairs.end(), SortScorePairDescend<int>);
W
wanghaox 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56
  accu_vec->clear();
  size_t sum = 0;
  for (size_t i = 0; i < in_pairs.size(); ++i) {
    auto count = in_pairs[i].second;
    sum += count;
    accu_vec->push_back(sum);
  }
}

template <typename Place, typename T>
class DetectionMAPOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
W
wanghaox 已提交
57
    auto* in_detect = ctx.Input<framework::LoDTensor>("DetectRes");
W
wanghaox 已提交
58 59
    auto* in_label = ctx.Input<framework::LoDTensor>("Label");
    auto* out_map = ctx.Output<framework::Tensor>("MAP");
W
wanghaox 已提交
60

W
wanghaox 已提交
61 62 63 64
    auto* in_pos_count = ctx.Input<framework::Tensor>("PosCount");
    auto* in_true_pos = ctx.Input<framework::LoDTensor>("TruePos");
    auto* in_false_pos = ctx.Input<framework::LoDTensor>("FalsePos");

W
wanghaox 已提交
65 66 67
    auto* out_pos_count = ctx.Output<framework::Tensor>("AccumPosCount");
    auto* out_true_pos = ctx.Output<framework::LoDTensor>("AccumTruePos");
    auto* out_false_pos = ctx.Output<framework::LoDTensor>("AccumFalsePos");
W
wanghaox 已提交
68

W
wanghaox 已提交
69 70
    float overlap_threshold = ctx.Attr<float>("overlap_threshold");
    float evaluate_difficult = ctx.Attr<bool>("evaluate_difficult");
W
wanghaox 已提交
71
    auto ap_type = GetAPType(ctx.Attr<std::string>("ap_type"));
72
    int class_num = ctx.Attr<int>("class_num");
W
wanghaox 已提交
73

W
wanghaox 已提交
74 75
    auto label_lod = in_label->lod();
    auto detect_lod = in_detect->lod();
W
wanghaox 已提交
76 77
    PADDLE_ENFORCE_EQ(label_lod.size(), 1UL,
                      "Only support one level sequence now.");
W
wanghaox 已提交
78 79 80 81 82 83 84 85
    PADDLE_ENFORCE_EQ(label_lod[0].size(), detect_lod[0].size(),
                      "The batch_size of input(Label) and input(Detection) "
                      "must be the same.");

    std::vector<std::map<int, std::vector<Box>>> gt_boxes;
    std::vector<std::map<int, std::vector<std::pair<T, Box>>>> detect_boxes;

    GetBoxes(*in_label, *in_detect, gt_boxes, detect_boxes);
W
wanghaox 已提交
86 87 88 89 90

    std::map<int, int> label_pos_count;
    std::map<int, std::vector<std::pair<T, int>>> true_pos;
    std::map<int, std::vector<std::pair<T, int>>> false_pos;

91 92 93 94 95 96 97
    auto* has_state = ctx.Input<framework::LoDTensor>("HasState");
    int state = 0;
    if (has_state) {
      state = has_state->data<int>()[0];
    }

    if (in_pos_count != nullptr && state) {
W
wanghaox 已提交
98
      GetInputPos(*in_pos_count, *in_true_pos, *in_false_pos, label_pos_count,
99
                  true_pos, false_pos, class_num);
W
wanghaox 已提交
100 101
    }

W
wanghaox 已提交
102 103 104
    CalcTrueAndFalsePositive(gt_boxes, detect_boxes, evaluate_difficult,
                             overlap_threshold, label_pos_count, true_pos,
                             false_pos);
W
wanghaox 已提交
105

106 107 108
    int background_label = ctx.Attr<int>("background_label");
    T map = CalcMAP(ap_type, label_pos_count, true_pos, false_pos,
                    background_label);
W
wanghaox 已提交
109

W
wanghaox 已提交
110
    GetOutputPos(ctx, label_pos_count, true_pos, false_pos, *out_pos_count,
111
                 *out_true_pos, *out_false_pos, class_num);
W
wanghaox 已提交
112

W
wanghaox 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    T* map_data = out_map->mutable_data<T>(ctx.GetPlace());
    map_data[0] = map;
  }

 protected:
  struct Box {
    Box(T xmin, T ymin, T xmax, T ymax)
        : xmin(xmin), ymin(ymin), xmax(xmax), ymax(ymax), is_difficult(false) {}

    T xmin, ymin, xmax, ymax;
    bool is_difficult;
  };

  inline T JaccardOverlap(const Box& box1, const Box& box2) const {
    if (box2.xmin > box1.xmax || box2.xmax < box1.xmin ||
        box2.ymin > box1.ymax || box2.ymax < box1.ymin) {
      return 0.0;
W
wanghaox 已提交
130
    } else {
W
wanghaox 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143
      T inter_xmin = std::max(box1.xmin, box2.xmin);
      T inter_ymin = std::max(box1.ymin, box2.ymin);
      T inter_xmax = std::min(box1.xmax, box2.xmax);
      T inter_ymax = std::min(box1.ymax, box2.ymax);

      T inter_width = inter_xmax - inter_xmin;
      T inter_height = inter_ymax - inter_ymin;
      T inter_area = inter_width * inter_height;

      T bbox_area1 = (box1.xmax - box1.xmin) * (box1.ymax - box1.ymin);
      T bbox_area2 = (box2.xmax - box2.xmin) * (box2.ymax - box2.ymin);

      return inter_area / (bbox_area1 + bbox_area2 - inter_area);
W
wanghaox 已提交
144 145 146
    }
  }

147 148 149 150 151 152 153 154 155
  inline void ClipBBox(const Box& bbox, Box* clipped_bbox) const {
    T one = static_cast<T>(1.0);
    T zero = static_cast<T>(0.0);
    clipped_bbox->xmin = std::max(std::min(bbox.xmin, one), zero);
    clipped_bbox->ymin = std::max(std::min(bbox.ymin, one), zero);
    clipped_bbox->xmax = std::max(std::min(bbox.xmax, one), zero);
    clipped_bbox->ymax = std::max(std::min(bbox.ymax, one), zero);
  }

W
wanghaox 已提交
156 157 158 159 160 161 162
  void GetBoxes(const framework::LoDTensor& input_label,
                const framework::LoDTensor& input_detect,
                std::vector<std::map<int, std::vector<Box>>>& gt_boxes,
                std::vector<std::map<int, std::vector<std::pair<T, Box>>>>&
                    detect_boxes) const {
    auto labels = framework::EigenTensor<T, 2>::From(input_label);
    auto detect = framework::EigenTensor<T, 2>::From(input_detect);
W
wanghaox 已提交
163 164

    auto label_lod = input_label.lod();
W
wanghaox 已提交
165 166 167
    auto detect_lod = input_detect.lod();

    int batch_size = label_lod[0].size() - 1;
W
wanghaox 已提交
168 169
    auto label_index = label_lod[0];

W
wanghaox 已提交
170 171
    for (int n = 0; n < batch_size; ++n) {
      std::map<int, std::vector<Box>> boxes;
Q
QI JUN 已提交
172
      for (size_t i = label_index[n]; i < label_index[n + 1]; ++i) {
W
wanghaox 已提交
173 174 175 176 177 178 179 180
        Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5));
        int label = labels(i, 0);
        auto is_difficult = labels(i, 1);
        if (std::abs(is_difficult - 0.0) < 1e-6)
          box.is_difficult = false;
        else
          box.is_difficult = true;
        boxes[label].push_back(box);
W
wanghaox 已提交
181
      }
W
wanghaox 已提交
182
      gt_boxes.push_back(boxes);
W
wanghaox 已提交
183 184
    }

W
wanghaox 已提交
185 186 187
    auto detect_index = detect_lod[0];
    for (int n = 0; n < batch_size; ++n) {
      std::map<int, std::vector<std::pair<T, Box>>> boxes;
Q
QI JUN 已提交
188
      for (size_t i = detect_index[n]; i < detect_index[n + 1]; ++i) {
W
wanghaox 已提交
189 190 191 192
        Box box(detect(i, 2), detect(i, 3), detect(i, 4), detect(i, 5));
        int label = detect(i, 0);
        auto score = detect(i, 1);
        boxes[label].push_back(std::make_pair(score, box));
W
wanghaox 已提交
193
      }
W
wanghaox 已提交
194
      detect_boxes.push_back(boxes);
W
wanghaox 已提交
195 196 197
    }
  }

W
wanghaox 已提交
198 199 200 201 202 203 204
  void GetOutputPos(
      const framework::ExecutionContext& ctx,
      const std::map<int, int>& label_pos_count,
      const std::map<int, std::vector<std::pair<T, int>>>& true_pos,
      const std::map<int, std::vector<std::pair<T, int>>>& false_pos,
      framework::Tensor& output_pos_count,
      framework::LoDTensor& output_true_pos,
205
      framework::LoDTensor& output_false_pos, const int class_num) const {
W
wanghaox 已提交
206 207
    int true_pos_count = 0;
    int false_pos_count = 0;
208 209 210 211 212 213 214
    for (auto it = true_pos.begin(); it != true_pos.end(); ++it) {
      auto tp = it->second;
      true_pos_count += tp.size();
    }
    for (auto it = false_pos.begin(); it != false_pos.end(); ++it) {
      auto fp = it->second;
      false_pos_count += fp.size();
W
wanghaox 已提交
215 216 217
    }

    int* pos_count_data = output_pos_count.mutable_data<int>(
218
        framework::make_ddim({class_num, 1}), ctx.GetPlace());
219

W
wanghaox 已提交
220 221 222 223 224 225 226 227
    T* true_pos_data = output_true_pos.mutable_data<T>(
        framework::make_ddim({true_pos_count, 2}), ctx.GetPlace());
    T* false_pos_data = output_false_pos.mutable_data<T>(
        framework::make_ddim({false_pos_count, 2}), ctx.GetPlace());
    true_pos_count = 0;
    false_pos_count = 0;
    std::vector<size_t> true_pos_starts = {0};
    std::vector<size_t> false_pos_starts = {0};
228
    for (int i = 0; i < class_num; ++i) {
W
wanghaox 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
      auto it_count = label_pos_count.find(i);
      pos_count_data[i] = 0;
      if (it_count != label_pos_count.end()) {
        pos_count_data[i] = it_count->second;
      }
      auto it_true_pos = true_pos.find(i);
      if (it_true_pos != true_pos.end()) {
        const std::vector<std::pair<T, int>>& true_pos_vec =
            it_true_pos->second;
        for (const std::pair<T, int>& tp : true_pos_vec) {
          true_pos_data[true_pos_count * 2] = tp.first;
          true_pos_data[true_pos_count * 2 + 1] = static_cast<T>(tp.second);
          true_pos_count++;
        }
      }
      true_pos_starts.push_back(true_pos_count);

      auto it_false_pos = false_pos.find(i);
      if (it_false_pos != false_pos.end()) {
        const std::vector<std::pair<T, int>>& false_pos_vec =
            it_false_pos->second;
        for (const std::pair<T, int>& fp : false_pos_vec) {
          false_pos_data[false_pos_count * 2] = fp.first;
          false_pos_data[false_pos_count * 2 + 1] = static_cast<T>(fp.second);
          false_pos_count++;
        }
      }
      false_pos_starts.push_back(false_pos_count);
    }

    framework::LoD true_pos_lod;
    true_pos_lod.emplace_back(true_pos_starts);
    framework::LoD false_pos_lod;
    false_pos_lod.emplace_back(false_pos_starts);

    output_true_pos.set_lod(true_pos_lod);
    output_false_pos.set_lod(false_pos_lod);
    return;
  }

269 270 271 272 273 274 275
  void GetInputPos(const framework::Tensor& input_pos_count,
                   const framework::LoDTensor& input_true_pos,
                   const framework::LoDTensor& input_false_pos,
                   std::map<int, int>& label_pos_count,
                   std::map<int, std::vector<std::pair<T, int>>>& true_pos,
                   std::map<int, std::vector<std::pair<T, int>>>& false_pos,
                   const int class_num) const {
W
wanghaox 已提交
276
    const int* pos_count_data = input_pos_count.data<int>();
277
    for (int i = 0; i < class_num; ++i) {
W
wanghaox 已提交
278 279 280
      label_pos_count[i] = pos_count_data[i];
    }

W
wanghaox 已提交
281 282 283
    auto SetData = [](const framework::LoDTensor& pos_tensor,
                      std::map<int, std::vector<std::pair<T, int>>>& pos) {
      const T* pos_data = pos_tensor.data<T>();
284 285 286
      auto pos_data_lod = pos_tensor.lod()[0];
      for (size_t i = 0; i < pos_data_lod.size() - 1; ++i) {
        for (size_t j = pos_data_lod[i]; j < pos_data_lod[i + 1]; ++j) {
W
wanghaox 已提交
287
          T score = pos_data[j * 2];
288
          int flag = pos_data[j * 2 + 1];
W
wanghaox 已提交
289 290
          pos[i].push_back(std::make_pair(score, flag));
        }
W
wanghaox 已提交
291
      }
W
wanghaox 已提交
292 293 294 295
    };

    SetData(input_true_pos, true_pos);
    SetData(input_false_pos, false_pos);
W
wanghaox 已提交
296 297 298
    return;
  }

W
wanghaox 已提交
299
  void CalcTrueAndFalsePositive(
W
wanghaox 已提交
300 301 302 303
      const std::vector<std::map<int, std::vector<Box>>>& gt_boxes,
      const std::vector<std::map<int, std::vector<std::pair<T, Box>>>>&
          detect_boxes,
      bool evaluate_difficult, float overlap_threshold,
W
wanghaox 已提交
304 305 306
      std::map<int, int>& label_pos_count,
      std::map<int, std::vector<std::pair<T, int>>>& true_pos,
      std::map<int, std::vector<std::pair<T, int>>>& false_pos) const {
W
wanghaox 已提交
307 308 309 310
    int batch_size = gt_boxes.size();
    for (int n = 0; n < batch_size; ++n) {
      auto image_gt_boxes = gt_boxes[n];
      for (auto it = image_gt_boxes.begin(); it != image_gt_boxes.end(); ++it) {
W
wanghaox 已提交
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
        size_t count = 0;
        auto labeled_bboxes = it->second;
        if (evaluate_difficult) {
          count = labeled_bboxes.size();
        } else {
          for (size_t i = 0; i < labeled_bboxes.size(); ++i)
            if (!(labeled_bboxes[i].is_difficult)) ++count;
        }
        if (count == 0) {
          continue;
        }
        int label = it->first;
        if (label_pos_count.find(label) == label_pos_count.end()) {
          label_pos_count[label] = count;
        } else {
          label_pos_count[label] += count;
        }
      }
    }

W
wanghaox 已提交
331 332 333
    for (size_t n = 0; n < detect_boxes.size(); ++n) {
      auto image_gt_boxes = gt_boxes[n];
      auto detections = detect_boxes[n];
W
wanghaox 已提交
334

W
wanghaox 已提交
335
      if (image_gt_boxes.size() == 0) {
W
wanghaox 已提交
336
        for (auto it = detections.begin(); it != detections.end(); ++it) {
W
wanghaox 已提交
337
          auto pred_boxes = it->second;
W
wanghaox 已提交
338
          int label = it->first;
W
wanghaox 已提交
339 340
          for (size_t i = 0; i < pred_boxes.size(); ++i) {
            auto score = pred_boxes[i].first;
W
wanghaox 已提交
341 342 343 344 345 346 347 348 349
            true_pos[label].push_back(std::make_pair(score, 0));
            false_pos[label].push_back(std::make_pair(score, 1));
          }
        }
        continue;
      }

      for (auto it = detections.begin(); it != detections.end(); ++it) {
        int label = it->first;
W
wanghaox 已提交
350 351 352 353
        auto pred_boxes = it->second;
        if (image_gt_boxes.find(label) == image_gt_boxes.end()) {
          for (size_t i = 0; i < pred_boxes.size(); ++i) {
            auto score = pred_boxes[i].first;
W
wanghaox 已提交
354 355 356 357 358 359
            true_pos[label].push_back(std::make_pair(score, 0));
            false_pos[label].push_back(std::make_pair(score, 1));
          }
          continue;
        }

W
wanghaox 已提交
360
        auto matched_bboxes = image_gt_boxes.find(label)->second;
W
wanghaox 已提交
361 362
        std::vector<bool> visited(matched_bboxes.size(), false);
        // Sort detections in descend order based on scores
W
wanghaox 已提交
363 364 365 366
        std::sort(pred_boxes.begin(), pred_boxes.end(),
                  SortScorePairDescend<Box>);
        for (size_t i = 0; i < pred_boxes.size(); ++i) {
          T max_overlap = -1.0;
W
wanghaox 已提交
367
          size_t max_idx = 0;
W
wanghaox 已提交
368
          auto score = pred_boxes[i].first;
W
wanghaox 已提交
369
          for (size_t j = 0; j < matched_bboxes.size(); ++j) {
370 371 372
            Box& pred_box = pred_boxes[i].second;
            ClipBBox(pred_box, &pred_box);
            T overlap = JaccardOverlap(pred_box, matched_bboxes[j]);
W
wanghaox 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
            if (overlap > max_overlap) {
              max_overlap = overlap;
              max_idx = j;
            }
          }
          if (max_overlap > overlap_threshold) {
            bool match_evaluate_difficult =
                evaluate_difficult ||
                (!evaluate_difficult && !matched_bboxes[max_idx].is_difficult);
            if (match_evaluate_difficult) {
              if (!visited[max_idx]) {
                true_pos[label].push_back(std::make_pair(score, 1));
                false_pos[label].push_back(std::make_pair(score, 0));
                visited[max_idx] = true;
              } else {
                true_pos[label].push_back(std::make_pair(score, 0));
                false_pos[label].push_back(std::make_pair(score, 1));
              }
            }
          } else {
            true_pos[label].push_back(std::make_pair(score, 0));
            false_pos[label].push_back(std::make_pair(score, 1));
          }
        }
      }
    }
  }

401 402 403 404
  T CalcMAP(APType ap_type, const std::map<int, int>& label_pos_count,
            const std::map<int, std::vector<std::pair<T, int>>>& true_pos,
            const std::map<int, std::vector<std::pair<T, int>>>& false_pos,
            const int background_label) const {
W
wanghaox 已提交
405 406 407 408 409
    T mAP = 0.0;
    int count = 0;
    for (auto it = label_pos_count.begin(); it != label_pos_count.end(); ++it) {
      int label = it->first;
      int label_num_pos = it->second;
410 411
      if (label_num_pos == background_label ||
          true_pos.find(label) == true_pos.end()) {
W
wanghaox 已提交
412
        continue;
413
      }
W
wanghaox 已提交
414 415 416 417 418 419 420
      auto label_true_pos = true_pos.find(label)->second;
      auto label_false_pos = false_pos.find(label)->second;
      // Compute average precision.
      std::vector<int> tp_sum;
      GetAccumulation<T>(label_true_pos, &tp_sum);
      std::vector<int> fp_sum;
      GetAccumulation<T>(label_false_pos, &fp_sum);
W
wanghaox 已提交
421
      std::vector<T> precision, recall;
W
wanghaox 已提交
422 423 424
      size_t num = tp_sum.size();
      // Compute Precision.
      for (size_t i = 0; i < num; ++i) {
W
wanghaox 已提交
425 426 427
        precision.push_back(static_cast<T>(tp_sum[i]) /
                            static_cast<T>(tp_sum[i] + fp_sum[i]));
        recall.push_back(static_cast<T>(tp_sum[i]) / label_num_pos);
W
wanghaox 已提交
428 429
      }
      // VOC2007 style
W
wanghaox 已提交
430 431
      if (ap_type == APType::k11point) {
        std::vector<T> max_precisions(11, 0.0);
W
wanghaox 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445
        int start_idx = num - 1;
        for (int j = 10; j >= 0; --j)
          for (int i = start_idx; i >= 0; --i) {
            if (recall[i] < j / 10.) {
              start_idx = i;
              if (j > 0) max_precisions[j - 1] = max_precisions[j];
              break;
            } else {
              if (max_precisions[j] < precision[i])
                max_precisions[j] = precision[i];
            }
          }
        for (int j = 10; j >= 0; --j) mAP += max_precisions[j] / 11;
        ++count;
W
wanghaox 已提交
446
      } else if (ap_type == APType::kIntegral) {
W
wanghaox 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
        // Nature integral
        float average_precisions = 0.;
        float prev_recall = 0.;
        for (size_t i = 0; i < num; ++i) {
          if (fabs(recall[i] - prev_recall) > 1e-6)
            average_precisions += precision[i] * fabs(recall[i] - prev_recall);
          prev_recall = recall[i];
        }
        mAP += average_precisions;
        ++count;
      } else {
        LOG(FATAL) << "Unkown ap version: " << ap_type;
      }
    }
    if (count != 0) mAP /= count;
462
    return mAP;
W
wanghaox 已提交
463 464 465 466 467
  }
};  // namespace operators

}  // namespace operators
}  // namespace paddle