mine_hard_examples_op.cc 16.7 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
W
wanghaox 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
W
wanghaox 已提交
17 18 19 20

namespace paddle {
namespace operators {

W
wanghaox 已提交
21 22 23 24 25 26 27 28
enum MiningType { kNone = 0, kMaxNegative, kHardExample };

template <typename T>
bool SortScoreDescend(const std::pair<float, T>& pair1,
                      const std::pair<float, T>& pair2) {
  return pair1.first > pair2.first;
}

29 30
inline bool IsEligibleMining(const MiningType mining_type,
                             const int match_idx,
W
wanghaox 已提交
31 32 33 34 35 36 37 38 39 40 41
                             const float match_dist,
                             const float neg_dist_threshold) {
  if (mining_type == MiningType::kMaxNegative) {
    return match_idx == -1 && match_dist < neg_dist_threshold;
  } else if (mining_type == MiningType::kHardExample) {
    return true;
  } else {
    return false;
  }
}

W
wanghaox 已提交
42
inline MiningType GetMiningType(std::string str) {
W
wanghaox 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
  if (str == "max_negative") {
    return MiningType::kMaxNegative;
  } else if (str == "hard_example") {
    return MiningType::kHardExample;
  } else {
    return MiningType::kNone;
  }
}

template <typename DeviceContext, typename T>
class MineHardExamplesKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* in_cls_loss = ctx.Input<framework::Tensor>("ClsLoss");
    auto* in_loc_loss = ctx.Input<framework::Tensor>("LocLoss");
    auto* in_matched_indices = ctx.Input<framework::Tensor>("MatchIndices");
    auto* in_match_dist = ctx.Input<framework::Tensor>("MatchDist");
    float neg_pos_ratio = ctx.Attr<float>("neg_pos_ratio");
    T neg_dist_threshold =
        static_cast<T>(ctx.Attr<float>("neg_dist_threshold"));
    int sample_size = ctx.Attr<int>("sample_size");
    MiningType mining_type =
        GetMiningType(ctx.Attr<std::string>("mining_type"));

    auto out_neg_indices = ctx.Output<framework::LoDTensor>("NegIndices");
    auto out_match_indices =
        ctx.Output<framework::Tensor>("UpdatedMatchIndices");

71 72
    framework::TensorCopy(
        *in_matched_indices, ctx.GetPlace(), out_match_indices);
W
wanghaox 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95

    int batch_size = in_matched_indices->dims()[0];
    int prior_num = in_matched_indices->dims()[1];

    auto match_indices = framework::EigenMatrix<int>::From(*in_matched_indices);

    auto match_indices_et =
        framework::EigenMatrix<int>::From(*out_match_indices);

    auto match_dist = framework::EigenMatrix<T>::From(*in_match_dist);

    const T* cls_loss = in_cls_loss->data<T>();
    const T* loc_loss = nullptr;
    if (in_loc_loss) {
      loc_loss = in_loc_loss->data<T>();
    }

    std::vector<std::vector<int>> all_neg_indices;
    std::vector<size_t> batch_starts = {0};
    for (int n = 0; n < batch_size; ++n) {
      std::vector<std::pair<T, size_t>> loss_idx;
      int neg_sel = 0;
      for (int m = 0; m < prior_num; ++m) {
96 97 98
        if (IsEligibleMining(mining_type,
                             match_indices(n, m),
                             match_dist(n, m),
W
wanghaox 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
                             neg_dist_threshold)) {
          T loss = cls_loss[n * prior_num + m];
          if (mining_type == MiningType::kHardExample && loc_loss != nullptr) {
            loss = cls_loss[n * prior_num + m] + loc_loss[n * prior_num + m];
          }
          loss_idx.push_back(std::make_pair(loss, m));
          ++neg_sel;
        }
      }

      if (mining_type == MiningType::kMaxNegative) {
        int num_pos = 0;
        for (int m = 0; m < prior_num; ++m) {
          if (match_indices(n, m) != -1) ++num_pos;
        }
        neg_sel = std::min(static_cast<int>(num_pos * neg_pos_ratio), neg_sel);
      } else if (mining_type == MiningType::kHardExample) {
        neg_sel = std::min(sample_size, neg_sel);
      }

W
wanghaox 已提交
119
      std::sort(loss_idx.begin(), loss_idx.end(), SortScoreDescend<size_t>);
W
wanghaox 已提交
120 121
      std::set<int> sel_indices;
      std::vector<int> neg_indices;
122 123
      std::transform(loss_idx.begin(),
                     loss_idx.begin() + neg_sel,
W
wanghaox 已提交
124
                     std::inserter(sel_indices, sel_indices.begin()),
W
wanghaox 已提交
125
                     [](std::pair<T, size_t>& l) -> int {
W
wanghaox 已提交
126 127 128
                       return static_cast<int>(l.second);
                     });

W
wanghaox 已提交
129 130 131 132 133 134 135 136 137 138
      if (mining_type == MiningType::kHardExample) {
        for (int m = 0; m < prior_num; ++m) {
          if (match_indices(n, m) > -1) {
            if (sel_indices.find(m) == sel_indices.end()) {
              match_indices_et(n, m) = -1;
            }
          } else {
            if (sel_indices.find(m) != sel_indices.end()) {
              neg_indices.push_back(m);
            }
W
wanghaox 已提交
139
          }
W
wanghaox 已提交
140 141
        }
      } else {
W
wanghaox 已提交
142 143
        neg_indices.resize(sel_indices.size());
        std::copy(sel_indices.begin(), sel_indices.end(), neg_indices.begin());
W
wanghaox 已提交
144
      }
W
wanghaox 已提交
145

W
wanghaox 已提交
146 147 148 149 150 151 152 153
      all_neg_indices.push_back(neg_indices);
      batch_starts.push_back(batch_starts.back() + neg_indices.size());
    }

    framework::LoD out_neg_indices_lod;
    out_neg_indices_lod.emplace_back(batch_starts);
    int neg_offset = 0;
    auto neg_data = out_neg_indices->mutable_data<int>(
154
        phi::make_ddim({static_cast<int>(batch_starts.back()), 1}),
W
wanghaox 已提交
155 156 157 158 159 160 161 162 163 164 165
        ctx.GetPlace());

    for (auto neg_indices : all_neg_indices) {
      std::copy(neg_indices.begin(), neg_indices.end(), neg_data + neg_offset);
      neg_offset += neg_indices.size();
    }
    out_neg_indices->set_lod(out_neg_indices_lod);
    return;
  }
};

W
wanghaox 已提交
166 167 168 169 170
class MineHardExamplesOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
W
wanghaox 已提交
171
  void InferShape(framework::InferShapeContext* ctx) const override {
172 173 174 175 176
    OP_INOUT_CHECK(
        ctx->HasInput("ClsLoss"), "Input", "ClsLoss", "mine_hard_examples");
    OP_INOUT_CHECK(ctx->HasInput("MatchIndices"),
                   "Input",
                   "MatchIndices",
177
                   "mine_hard_examples");
178 179 180 181 182
    OP_INOUT_CHECK(
        ctx->HasInput("MatchDist"), "Input", "MatchDist", "mine_hard_examples");
    OP_INOUT_CHECK(ctx->HasOutput("NegIndices"),
                   "Output",
                   "NegIndices",
183
                   "mine_hard_examples");
184 185 186
    OP_INOUT_CHECK(ctx->HasOutput("UpdatedMatchIndices"),
                   "Output",
                   "UpdatedMatchIndices",
187
                   "mine_hard_examples");
W
wanghaox 已提交
188 189

    auto cls_loss_dims = ctx->GetInputDim("ClsLoss");
W
wanghaox 已提交
190 191
    auto idx_dims = ctx->GetInputDim("MatchIndices");
    auto dis_dims = ctx->GetInputDim("MatchDist");
W
wanghaox 已提交
192

193 194
    PADDLE_ENFORCE_EQ(cls_loss_dims.size(),
                      2UL,
195 196 197 198
                      platform::errors::InvalidArgument(
                          "The shape of ClsLoss is [N, Np]. But received %d.",
                          cls_loss_dims.size()));
    PADDLE_ENFORCE_EQ(
199 200
        idx_dims.size(),
        2UL,
201 202 203
        platform::errors::InvalidArgument(
            "The shape of MatchIndices is [N, Np]. But received %d.",
            idx_dims.size()));
204 205
    PADDLE_ENFORCE_EQ(dis_dims.size(),
                      2UL,
206 207 208
                      platform::errors::InvalidArgument(
                          "The shape of MatchDist is [N, Np]. But received %d.",
                          dis_dims.size()));
W
wanghaox 已提交
209 210 211

    if (ctx->HasInput("LocLoss")) {
      auto loc_loss_dims = ctx->GetInputDim("LocLoss");
212 213
      PADDLE_ENFORCE_EQ(loc_loss_dims.size(),
                        2UL,
214 215 216
                        platform::errors::InvalidArgument(
                            "The shape of LocLoss is [N, Np]. But received %d.",
                            loc_loss_dims.size()));
217
      if (ctx->IsRuntime()) {
218 219
        PADDLE_ENFORCE_EQ(cls_loss_dims[0],
                          loc_loss_dims[0],
220 221 222 223
                          platform::errors::InvalidArgument(
                              "Batch size of ClsLoss and LocLoss must be the "
                              "same. But received batch size of ClsLoss was "
                              "%d, batch size of LocLoss was %d.",
224 225 226 227
                              cls_loss_dims[0],
                              loc_loss_dims[0]));
        PADDLE_ENFORCE_EQ(cls_loss_dims[1],
                          loc_loss_dims[1],
228 229 230 231
                          platform::errors::InvalidArgument(
                              "Prior box number of ClsLoss and LocLoss must be "
                              "the same. But received box number of ClsLoss "
                              "was %d, box number of LocLoss was %d.",
232 233
                              cls_loss_dims[1],
                              loc_loss_dims[1]));
234
      }
W
wanghaox 已提交
235 236
    }

237
    if (ctx->IsRuntime()) {
238 239
      PADDLE_ENFORCE_EQ(cls_loss_dims[0],
                        idx_dims[0],
240 241 242 243
                        platform::errors::InvalidArgument(
                            "Batch size of ClsLoss and MatchIndices must be "
                            "the same. But received batch size of ClsLoss was "
                            "%d, batch size of MatchIndices was %d.",
244 245
                            cls_loss_dims[0],
                            idx_dims[0]));
246
      PADDLE_ENFORCE_EQ(
247 248
          cls_loss_dims[1],
          idx_dims[1],
249 250 251 252
          platform::errors::InvalidArgument(
              "Prior box number of ClsLoss and "
              "MatchIndices must be the same. But received box number of "
              "ClsLoss was %d, box number of MatchIndices was %d.",
253 254
              cls_loss_dims[1],
              idx_dims[1]));
255

256 257
      PADDLE_ENFORCE_EQ(cls_loss_dims[0],
                        dis_dims[0],
258 259 260 261
                        platform::errors::InvalidArgument(
                            "Batch size of ClsLoss and MatchDist must be the "
                            "same. But received batch size of ClsLoss was %d, "
                            "batch size of MatchDist was %d.",
262 263 264 265
                            cls_loss_dims[0],
                            dis_dims[0]));
      PADDLE_ENFORCE_EQ(cls_loss_dims[1],
                        idx_dims[1],
266 267 268 269
                        platform::errors::InvalidArgument(
                            "Prior box number of ClsLoss and MatchDist must be "
                            "the same. But received box number of ClsLoss was "
                            "%d, box number of MatchDist was %d.",
270 271
                            cls_loss_dims[1],
                            idx_dims[1]));
272
    }
W
wanghaox 已提交
273 274 275 276

    auto mining_type =
        GetMiningType(ctx->Attrs().Get<std::string>("mining_type"));

277 278
    PADDLE_ENFORCE_NE(mining_type,
                      MiningType::kNone,
279 280
                      platform::errors::InvalidArgument(
                          "mining_type must be hard_example or max_negative"));
W
wanghaox 已提交
281 282 283

    if (mining_type == MiningType::kMaxNegative) {
      auto neg_pos_ratio = ctx->Attrs().Get<float>("neg_pos_ratio");
W
wanghaox 已提交
284
      auto neg_dist_threshold = ctx->Attrs().Get<float>("neg_dist_threshold");
285 286
      PADDLE_ENFORCE_GT(neg_pos_ratio,
                        0.0f,
287 288 289 290
                        platform::errors::InvalidArgument(
                            "neg_pos_ratio must greater than zero in "
                            "max_negative mode. But received %f.",
                            neg_pos_ratio));
291 292
      PADDLE_ENFORCE_LT(neg_dist_threshold,
                        1.0f,
293 294 295 296
                        platform::errors::InvalidArgument(
                            "neg_dist_threshold must less than one in "
                            "max_negative mode. But received %f.",
                            neg_dist_threshold));
297 298
      PADDLE_ENFORCE_GT(neg_dist_threshold,
                        0.0f,
299 300 301 302
                        platform::errors::InvalidArgument(
                            "neg_dist_threshold must greater "
                            "than zero in max_negative mode. But received %f.",
                            neg_dist_threshold));
W
wanghaox 已提交
303 304
    } else if (mining_type == MiningType::kHardExample) {
      auto sample_size = ctx->Attrs().Get<int>("sample_size");
305 306
      PADDLE_ENFORCE_GT(sample_size,
                        0,
307 308 309 310
                        platform::errors::InvalidArgument(
                            "sample_size must greater than zero in "
                            "hard_example mode. But received %d.",
                            sample_size));
W
wanghaox 已提交
311 312
    }

W
wanghaox 已提交
313
    ctx->SetOutputDim("UpdatedMatchIndices", idx_dims);
314 315
    // The first dimension of NegIndices will be set correcttly in Compute.
    ctx->SetOutputDim("NegIndices", {-1, 1});
W
wanghaox 已提交
316 317 318 319
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
W
wanghaox 已提交
320
      const framework::ExecutionContext& ctx) const override {
W
wanghaox 已提交
321
    return framework::OpKernelType(
322 323
        OperatorWithKernel::IndicateVarDataType(ctx, "ClsLoss"),
        platform::CPUPlace());
W
wanghaox 已提交
324 325 326 327 328
  }
};

class MineHardExamplesOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
329
  void Make() override {
W
wanghaox 已提交
330 331
    AddInput(
        "ClsLoss",
W
wanghaox 已提交
332
        "(Tensor, default Tensor<float>), The classification loss with shape "
W
wanghaox 已提交
333 334 335
        "[N, Np], N is the batch size and Np is the number of prior box.");
    AddInput("LocLoss",
             "(Tensor, optional, default Tensor<float>), The localization loss "
W
wanghaox 已提交
336
             "with shape [N, Np], N is the batch size and Np is the number of "
W
wanghaox 已提交
337 338
             "prior box.")
        .AsDispensable();
W
wanghaox 已提交
339
    AddInput("MatchIndices",
W
wanghaox 已提交
340 341
             "(Tensor, Tensor<int>), Matched indices with shape [N, Np], N is "
             "the batch size and Np is the number of prior box. "
W
wanghaox 已提交
342 343 344 345
             "MatchIndices[i][j] equal -1 means the j-th prior box in i-th "
             "instance does not match any entity, otherwise means it is "
             "matched to row.");
    AddInput("MatchDist",
W
wanghaox 已提交
346 347 348 349
             "(Tensor, default Tensor<float>) Matched indices with shape [N, "
             "Np], N is the batch size and Np is the number of prior box.");
    AddAttr<float>("neg_pos_ratio",
                   "(float) The ratio of the negative box to the positive "
W
wanghaox 已提交
350
                   "box. Use only when mining_type is max_negative.")
W
wanghaox 已提交
351
        .SetDefault(1.0);
W
wanghaox 已提交
352
    AddAttr<float>("neg_dist_threshold",
W
wanghaox 已提交
353 354
                   "(float) The negative overlap upper bound for the unmatched "
                   "predictions. Use only when mining_type is max_negative.")
W
wanghaox 已提交
355 356 357
        .SetDefault(0.5);
    AddAttr<int>("sample_size",
                 "(float) The max sample size of negative box. Use only when "
W
wanghaox 已提交
358
                 "mining_type is hard_example.")
W
wanghaox 已提交
359 360 361 362 363 364 365
        .SetDefault(0);
    AddAttr<std::string>("mining_type",
                         "(float) The mining algorithm name, the value is "
                         "hard_example or max_negative.")
        .SetDefault("max_negative")
        .InEnum({"hard_example", "max_negative"});

W
wanghaox 已提交
366 367 368 369 370 371 372 373 374 375 376 377
    AddOutput(
        "NegIndices",
        "(LoDTensor<int>) The output of negative example indices. a LoDTensor "
        "with shape [Neg, 1]. The size of lod[0] minus 1 is batch size, "
        "and each element is the prior box index. "
        "For example, the batch size is 2, the lod is [[0, 1, 2]], "
        "the sample 0's box 1(MatchIndices[0][1]) is selected, "
        "and sample 1's box 0 is selected. The output NegIndices is "
        "[[1], [0]].");

    AddOutput("UpdatedMatchIndices",
              "(Tensor<int>) The output of updated MatchIndices, a tensor with "
W
wanghaox 已提交
378
              "shape [N, Np]. Only update when mining_type is "
W
wanghaox 已提交
379 380 381
              "hard_example. The input MatchIndices elements will be update to "
              "-1 when it is not in the candidate high loss list of negative "
              "examples.");
W
wanghaox 已提交
382 383 384

    AddComment(R"DOC(
Mine hard examples Operator.
W
wanghaox 已提交
385
This operator implements hard example mining to select a subset of negative box indices.
386 387 388 389 390
For each image, selects the box with highest losses. subject to the condition that the
box cannot have an Matcht > neg_dist_threshold when mining_type is max_negative.
The selected number is min(sample_size, max_negative_box_number) when mining_type is
hard_example, or min(neg_pos_ratio * positive_box_number, max_negative_box_number)
when mining_type is max_negative, where the max_negative_box_number is the count of
W
wanghaox 已提交
391
MatchIndices elements with value -1.
W
wanghaox 已提交
392 393 394 395 396 397 398
)DOC");
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
399
REGISTER_OPERATOR(
400 401 402
    mine_hard_examples,
    ops::MineHardExamplesOp,
    ops::MineHardExamplesOpMaker,
H
hong 已提交
403 404
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
W
wanghaox 已提交
405

L
Leo Chen 已提交
406 407 408
REGISTER_OP_CPU_KERNEL(mine_hard_examples,
                       ops::MineHardExamplesKernel<phi::CPUContext, float>,
                       ops::MineHardExamplesKernel<phi::CPUContext, double>);