bipartite_match_op.cc 11.0 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
17 18 19 20 21 22 23 24 25 26 27 28

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;

class BipartiteMatchOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
D
dangqingqing 已提交
29 30
    PADDLE_ENFORCE(ctx->HasInput("DistMat"),
                   "Input(DistMat) of BipartiteMatch should not be null.");
D
dangqingqing 已提交
31 32 33 34 35 36
    PADDLE_ENFORCE(
        ctx->HasOutput("ColToRowMatchIndices"),
        "Output(ColToRowMatchIndices) of BipartiteMatch should not be null.");
    PADDLE_ENFORCE(
        ctx->HasOutput("ColToRowMatchDist"),
        "Output(ColToRowMatchDist) of BipartiteMatch should not be null.");
37

D
dangqingqing 已提交
38 39
    auto dims = ctx->GetInputDim("DistMat");
    PADDLE_ENFORCE_EQ(dims.size(), 2, "The rank of Input(DistMat) must be 2.");
40 41

    ctx->SetOutputDim("ColToRowMatchIndices", dims);
D
dangqingqing 已提交
42
    ctx->SetOutputDim("ColToRowMatchDist", dims);
43 44 45 46 47
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
48 49 50
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "DistMat"),
        platform::CPUPlace());
51 52 53
  }
};

54 55 56 57 58 59
template <class T>
bool DistPairDescend(std::tuple<int, int, T> pair1,
                     std::tuple<int, int, T> pair2) {
  return std::get<2>(pair1) > std::get<2>(pair2);
}

60 61 62 63
template <typename T>
class BipartiteMatchKernel : public framework::OpKernel<T> {
 public:
  // The match_indices must be initialized to -1 at first.
64 65 66 67 68 69 70
  // The match_dist must be initialized to 0 at first.
  void BipartiteMatch(const Tensor& dist, int* match_indices,
                      T* match_dist) const {
    PADDLE_ENFORCE_EQ(dist.dims().size(), 2, "The rank of dist must be 2.");
    int64_t row = dist.dims()[0];
    int64_t col = dist.dims()[1];
    auto* dist_data = dist.data<T>();
71 72 73 74 75 76 77
    // Test result: When row==130 the speed of these two methods almost the same
    if (row >= 130) {
      std::vector<std::tuple<int, int, T>> match_pair;

      for (int64_t i = 0; i < row; ++i) {
        for (int64_t j = 0; j < col; ++j) {
          match_pair.push_back(std::make_tuple(i, j, dist_data[i * col + j]));
78
        }
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
      }
      std::sort(match_pair.begin(), match_pair.end(), DistPairDescend<T>);
      std::vector<int> row_indices(row, -1);

      int64_t idx = 0;
      for (int64_t k = 0; k < row * col; ++k) {
        int64_t i = std::get<0>(match_pair[k]);
        int64_t j = std::get<1>(match_pair[k]);
        T dist = std::get<2>(match_pair[k]);

        if (idx >= row) {
          break;
        }
        if (match_indices[j] == -1 && row_indices[i] == -1 && dist > 0) {
          match_indices[j] = i;
          row_indices[i] = j;
          match_dist[j] = dist;
          idx += 1;
        }
      }
    } else {
      constexpr T kEPS = static_cast<T>(1e-6);
      std::vector<int> row_pool;
      for (int i = 0; i < row; ++i) {
        row_pool.push_back(i);
      }
      while (row_pool.size() > 0) {
        int max_idx = -1;
        int max_row_idx = -1;
        T max_dist = -1;
        for (int64_t j = 0; j < col; ++j) {
          if (match_indices[j] != -1) {
111 112
            continue;
          }
113 114 115 116 117 118 119 120 121 122 123
          for (size_t k = 0; k < row_pool.size(); ++k) {
            int m = row_pool[k];
            // distance is 0 between m-th row and j-th column
            if (dist_data[m * col + j] < kEPS) {
              continue;
            }
            if (dist_data[m * col + j] > max_dist) {
              max_idx = j;
              max_row_idx = m;
              max_dist = dist_data[m * col + j];
            }
124 125
          }
        }
126 127 128 129 130 131 132 133 134 135 136
        if (max_idx == -1) {
          // Cannot find good match.
          break;
        } else {
          PADDLE_ENFORCE_EQ(match_indices[max_idx], -1);
          match_indices[max_idx] = max_row_idx;
          match_dist[max_idx] = max_dist;
          // Erase the row index.
          row_pool.erase(
              std::find(row_pool.begin(), row_pool.end(), max_row_idx));
        }
137 138 139 140
      }
    }
  }

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
  void ArgMaxMatch(const Tensor& dist, int* match_indices, T* match_dist,
                   T overlap_threshold) const {
    constexpr T kEPS = static_cast<T>(1e-6);
    int64_t row = dist.dims()[0];
    int64_t col = dist.dims()[1];
    auto* dist_data = dist.data<T>();
    for (int64_t j = 0; j < col; ++j) {
      if (match_indices[j] != -1) {
        // the j-th column has been matched to one entity.
        continue;
      }
      int max_row_idx = -1;
      T max_dist = -1;
      for (int i = 0; i < row; ++i) {
        T dist = dist_data[i * col + j];
        if (dist < kEPS) {
          // distance is 0 between m-th row and j-th column
          continue;
        }
        if (dist >= overlap_threshold && dist > max_dist) {
          max_row_idx = i;
          max_dist = dist;
        }
      }
      if (max_row_idx != -1) {
        PADDLE_ENFORCE_EQ(match_indices[j], -1);
        match_indices[j] = max_row_idx;
        match_dist[j] = max_dist;
      }
    }
  }

173
  void Compute(const framework::ExecutionContext& context) const override {
D
dangqingqing 已提交
174
    auto* dist_mat = context.Input<LoDTensor>("DistMat");
175
    auto* match_indices = context.Output<Tensor>("ColToRowMatchIndices");
D
dangqingqing 已提交
176
    auto* match_dist = context.Output<Tensor>("ColToRowMatchDist");
177 178 179

    auto& dev_ctx = context.device_context<platform::CPUDeviceContext>();

180
    auto col = dist_mat->dims()[1];
181

182
    int64_t n = dist_mat->lod().size() == 0UL
183
                    ? 1
184 185 186 187 188
                    : static_cast<int64_t>(dist_mat->lod().back().size() - 1);
    if (dist_mat->lod().size()) {
      PADDLE_ENFORCE_EQ(dist_mat->lod().size(), 1UL,
                        "Only support 1 level of LoD.");
    }
189
    match_indices->mutable_data<int>({n, col}, context.GetPlace());
190
    match_dist->mutable_data<T>({n, col}, context.GetPlace());
191 192 193 194

    math::SetConstant<platform::CPUDeviceContext, int> iset;
    iset(dev_ctx, match_indices, static_cast<int>(-1));
    math::SetConstant<platform::CPUDeviceContext, T> tset;
195
    tset(dev_ctx, match_dist, static_cast<T>(0));
196 197

    int* indices = match_indices->data<int>();
198
    T* dist = match_dist->data<T>();
199 200
    auto type = context.Attr<std::string>("match_type");
    auto threshold = context.Attr<float>("dist_threshold");
201
    if (n == 1) {
202
      BipartiteMatch(*dist_mat, indices, dist);
203 204 205
      if (type == "per_prediction") {
        ArgMaxMatch(*dist_mat, indices, dist, threshold);
      }
206
    } else {
207
      auto lod = dist_mat->lod().back();
208
      for (size_t i = 0; i < lod.size() - 1; ++i) {
209 210
        Tensor one_ins = dist_mat->Slice(lod[i], lod[i + 1]);
        BipartiteMatch(one_ins, indices + i * col, dist + i * col);
211 212 213
        if (type == "per_prediction") {
          ArgMaxMatch(one_ins, indices + i * col, dist + i * col, threshold);
        }
214 215 216 217 218 219 220
      }
    }
  }
};

class BipartiteMatchOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
221
  void Make() override {
222
    AddInput(
D
dangqingqing 已提交
223
        "DistMat",
224 225 226 227
        "(LoDTensor or Tensor) this input is a 2-D LoDTensor with shape "
        "[K, M]. It is pair-wise distance matrix between the entities "
        "represented by each row and each column. For example, assumed one "
        "entity is A with shape [K], another entity is B with shape [M]. The "
D
dangqingqing 已提交
228
        "DistMat[i][j] is the distance between A[i] and B[j]. The bigger "
229
        "the distance is, the better macthing the pairs are. Please note, "
230 231 232
        "This tensor can contain LoD information to represent a batch of "
        "inputs. One instance of this batch can contain different numbers of "
        "entities.");
233 234
    AddAttr<std::string>(
        "match_type",
翟飞跃 已提交
235
        "(string, default: per_prediction) "
236
        "The type of matching method, should be 'bipartite' or "
翟飞跃 已提交
237
        "'per_prediction', 'bipartite' by default.")
238 239 240 241
        .SetDefault("bipartite")
        .InEnum({"bipartite", "per_prediction"});
    AddAttr<float>(
        "dist_threshold",
翟飞跃 已提交
242
        "(float, default: 0.5) "
243 244 245
        "If `match_type` is 'per_prediction', this threshold is to determine "
        "the extra matching bboxes based on the maximum distance.")
        .SetDefault(0.5);
246 247 248 249 250
    AddOutput("ColToRowMatchIndices",
              "(Tensor) A 2-D Tensor with shape [N, M] in int type. "
              "N is the batch size. If ColToRowMatchIndices[i][j] is -1, it "
              "means B[j] does not match any entity in i-th instance. "
              "Otherwise, it means B[j] is matched to row "
251 252
              "ColToRowMatchIndices[i][j] in i-th instance. The row number of "
              "i-th instance is saved in ColToRowMatchIndices[i][j].");
D
dangqingqing 已提交
253
    AddOutput("ColToRowMatchDist",
254 255
              "(Tensor) A 2-D Tensor with shape [N, M] in float type. "
              "N is batch size. If ColToRowMatchIndices[i][j] is -1, "
D
dangqingqing 已提交
256
              "ColToRowMatchDist[i][j] is also -1.0. Otherwise, assumed "
257
              "ColToRowMatchIndices[i][j] = d, and the row offsets of each "
258
              "instance are called LoD. Then "
D
dangqingqing 已提交
259
              "ColToRowMatchDist[i][j] = DistMat[d+LoD[i]][j]");
260 261
    AddComment(R"DOC(
This operator is a greedy bipartite matching algorithm, which is used to
262 263 264 265 266
obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row, also can find the matched row for
each column. And this operator only calculate matched indices from column
to row. For each instance, the number of matched indices is the number of
267
of columns of the input distance matrix.
268 269

There are two outputs to save matched indices and distance.
270
A simple description, this algorithm matched the best (maximum distance)
271 272 273 274
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.

D
dangqingqing 已提交
275
Please note that the input DistMat can be LoDTensor (with LoD) or Tensor.
276 277 278 279 280 281 282 283 284 285 286
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.

)DOC");
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
287 288 289 290
REGISTER_OPERATOR(
    bipartite_match, ops::BipartiteMatchOp, ops::BipartiteMatchOpMaker,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
291 292
REGISTER_OP_CPU_KERNEL(bipartite_match, ops::BipartiteMatchKernel<float>,
                       ops::BipartiteMatchKernel<double>);