ctc_decode_op.cu 3.2 KB
Newer Older
W
wanghaoshuang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <stdio.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
18
#include "paddle/operators/ctc_decode_op.h"
W
wanghaoshuang 已提交
19 20 21 22 23

namespace paddle {
namespace operators {

template <typename T>
24
__global__ void MergeAndDelCudaKernel(const int64_t num_token, const T* tokens,
W
wanghaoshuang 已提交
25 26
                                      const size_t num_seq, size_t* lod0,
                                      const int blank, const int merge_repeated,
27
                                      size_t* out_lod0, T* output) {
W
wanghaoshuang 已提交
28 29 30 31
  int ouput_idx = 0;
  out_lod0[0] = 0;

  for (int i = 0; i < num_seq; ++i) {
32
    T pre_token = -1;
W
wanghaoshuang 已提交
33 34 35 36 37 38 39 40 41 42 43 44
    for (int j = lod0[i]; j < lod0[i + 1]; ++j) {
      if (tokens[j] != blank && !(merge_repeated && tokens[j] == pre_token)) {
        output[ouput_idx] = tokens[j];
        ++ouput_idx;
      }
      pre_token = tokens[j];
    }
    out_lod0[i + 1] = ouput_idx;
  }
}

template <typename T>
45
class CTCDecodeOpCUDAKernel : public framework::OpKernel<T> {
W
wanghaoshuang 已提交
46 47 48 49
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
                   "It must use CUDAPlace.");
50
    const size_t level = 0;
W
wanghaoshuang 已提交
51 52
    auto* input = ctx.Input<LoDTensor>("Input");
    auto* output = ctx.Output<LoDTensor>("Output");
53
    auto input_lod = framework::ToAbsOffset(input->lod());
W
wanghaoshuang 已提交
54

55
    const T* tokens = input->data<T>();
W
wanghaoshuang 已提交
56 57
    const int64_t num_tokens = input->dims()[0];
    const size_t num_seq = input_lod[level].size() - 1;
58

W
wanghaoshuang 已提交
59 60 61 62
    const int blank = ctx.Attr<int>("blank");
    const int merge_repeated =
        static_cast<int>(ctx.Attr<bool>("merge_repeated"));

63
    // prepare a lod to record lod information while merging elements
W
wanghaoshuang 已提交
64 65 66
    thrust::device_vector<size_t> dev_out_lod0(input_lod[level].size());
    size_t* dev_out_lod0_ptr = thrust::raw_pointer_cast(dev_out_lod0.data());

67 68 69 70
    // merge elements and delete blank
    T* output_data = output->mutable_data<T>({num_tokens, 1}, ctx.GetPlace());

    auto stream = ctx.cuda_device_context().stream();
W
wanghaoshuang 已提交
71 72 73 74
    MergeAndDelCudaKernel<T><<<1, 1, 0, stream>>>(
        num_tokens, tokens, num_seq, input_lod[level].data(), blank,
        merge_repeated, dev_out_lod0_ptr, output_data);

75
    // set output lod
W
wanghaoshuang 已提交
76 77 78 79 80 81
    thrust::host_vector<size_t> host_out_lod0(dev_out_lod0.begin(),
                                              dev_out_lod0.end());
    framework::LoD out_lod;
    out_lod.push_back(host_out_lod0);
    output->set_lod(out_lod);

82
    // resize output dims
W
wanghaoshuang 已提交
83 84 85 86 87 88 89
    output->Resize({static_cast<int64_t>(host_out_lod0.back()), 1});
  }
};

}  // namespace operators
}  // namespace paddle

90 91
REGISTER_OP_CUDA_KERNEL(ctc_decode,
                        paddle::operators::CTCDecodeOpCUDAKernel<int>);