edit_distance_op.h 3.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yibing Liu 已提交
2

Y
Yibing Liu 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yibing Liu 已提交
6

Y
Yibing Liu 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yibing Liu 已提交
8

Y
Yibing Liu 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yibing Liu 已提交
14 15 16

#pragma once
#include <algorithm>
Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
Y
Yibing Liu 已提交
19 20 21 22
namespace paddle {
namespace operators {

template <typename Place, typename T>
23
class EditDistanceKernel : public framework::OpKernel<T> {
Y
Yibing Liu 已提交
24 25 26 27
 public:
  void Compute(const framework::ExecutionContext& ctx) const {
    auto* out_t = ctx.Output<framework::Tensor>("Out");

28 29
    auto* x1_t = ctx.Input<framework::LoDTensor>("Hyps");
    auto* x2_t = ctx.Input<framework::LoDTensor>("Refs");
30 31
    auto* sequence_num = ctx.Output<framework::Tensor>("SequenceNum");
    int64_t* seq_num_data = sequence_num->mutable_data<int64_t>(ctx.GetPlace());
Y
Yibing Liu 已提交
32

33 34 35 36 37 38 39 40 41 42 43 44
    auto normalized = ctx.Attr<bool>("normalized");

    auto hyp_lod = x1_t->lod()[0];
    auto ref_lod = x2_t->lod()[0];
    PADDLE_ENFORCE(
        hyp_lod.size() == ref_lod.size(),
        "Input(Hyps) and Input(Refs) must have the same batch size.");
    for (size_t i = 1; i < ref_lod.size(); ++i) {
      PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1],
                     "Reference string %d is empty.", i);
    }
    auto num_strs = hyp_lod.size() - 1;
45
    *seq_num_data = static_cast<int64_t>(num_strs);
46 47

    out_t->Resize({static_cast<int64_t>(num_strs), 1});
Y
Yibing Liu 已提交
48
    out_t->mutable_data<float>(ctx.GetPlace());
49
    auto out = out_t->data<T>();
Y
Yibing Liu 已提交
50

51
    T distance = 0.0;
52 53 54
    for (size_t num = 0; num < num_strs; ++num) {
      auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]);
      auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]);
Y
Yibing Liu 已提交
55

56
      if (m == 0) {
57
        distance = n;
58
      } else if (n == 0) {
59
        distance = m;
60 61 62 63 64
      } else {
        framework::Tensor dist_t;
        dist_t.Resize({m + 1, n + 1});
        dist_t.mutable_data<T>(ctx.GetPlace());
        auto dist = dist_t.data<T>();
65 66
        auto x1 = x1_t->data<int64_t>() + hyp_lod[num];
        auto x2 = x2_t->data<int64_t>() + ref_lod[num];
67 68 69 70 71 72 73 74 75 76 77 78 79 80
        for (int64_t i = 0; i < m + 1; ++i) {
          dist[i * (n + 1)] = i;
        }
        for (int64_t j = 0; j < n + 1; ++j) {
          dist[j] = j;
        }
        for (int64_t i = 1; i < m + 1; ++i) {
          for (int64_t j = 1; j < n + 1; ++j) {
            int cost = x1[i - 1] == x2[j - 1] ? 0 : 1;
            int dels = dist[(i - 1) * (n + 1) + j] + 1;
            int ins = dist[i * (n + 1) + (j - 1)] + 1;
            int subs = dist[(i - 1) * (n + 1) + (j - 1)] + cost;
            dist[i * (n + 1) + j] = std::min(dels, std::min(ins, subs));
          }
Y
Yibing Liu 已提交
81
        }
82
        distance = dist[m * (n + 1) + n];
Y
Yibing Liu 已提交
83 84
      }

85 86 87 88 89
      if (normalized) {
        PADDLE_ENFORCE(n > 0,
                       "The reference string (#%d) cannot be empty "
                       "when Attr(normalized) is enabled.",
                       n);
90
        distance = distance / n;
91
      }
92
      out[num] = distance;
Y
Yibing Liu 已提交
93 94 95 96 97 98
    }
  }
};

}  // namespace operators
}  // namespace paddle