warpctc_op.h 19.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yiqun Liu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17
#include <vector>
18
#include "paddle/fluid/framework/eigen.h"
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/op_registry.h"
20
#include "paddle/fluid/framework/op_version_registry.h"
Y
Yi Wang 已提交
21 22 23 24
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/fluid/platform/dynload/warpctc.h"
Y
Yiqun Liu 已提交
25 26 27 28 29 30 31

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
template <typename DeviceContext, typename T>
class ComputeCtcLossFunctor {
 public:
  ctcStatus_t operator()(const T* const activations, T* gradients,
                         const int* const flat_labels,
                         const int* const label_lengths,
                         const int* const input_lengths, int alphabet_size,
                         int minibatch, T* costs, void* workspace,
                         ctcOptions options) {
    return CTC_STATUS_EXECUTION_FAILED;
  }
};

template <typename DeviceContext>
class ComputeCtcLossFunctor<DeviceContext, float> {
 public:
  ctcStatus_t operator()(const float* const activations, float* gradients,
                         const int* const flat_labels,
                         const int* const label_lengths,
                         const int* const input_lengths, int alphabet_size,
                         int minibatch, float* costs, void* workspace,
                         ctcOptions options) {
    return platform::dynload::compute_ctc_loss(
        activations, gradients, flat_labels, label_lengths, input_lengths,
        static_cast<int>(alphabet_size), static_cast<int>(minibatch), costs,
        workspace, options);
  }
};

Y
Yiqun Liu 已提交
61
template <typename DeviceContext>
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
class ComputeCtcLossFunctor<DeviceContext, double> {
 public:
  ctcStatus_t operator()(const double* const activations, double* gradients,
                         const int* const flat_labels,
                         const int* const label_lengths,
                         const int* const input_lengths, int alphabet_size,
                         int minibatch, double* costs, void* workspace,
                         ctcOptions options) {
    return platform::dynload::compute_ctc_loss_double(
        activations, gradients, flat_labels, label_lengths, input_lengths,
        static_cast<int>(alphabet_size), static_cast<int>(minibatch), costs,
        workspace, options);
  }
};

template <typename DeviceContext, typename T>
Y
Yiqun Liu 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
class WarpCTCFunctor {
 public:
  /*
   * \brief Compute the connectionist temporal classification loss,
   *        and optionally compute the gradient with respect to the inputs.
   *
   * If gradient is nullptr, it only computes the ctc loss,
   * or computes both ctc loss and gradient.
   *
   * \param ctx               execution context of this functor
   * \param input             batch matrix of input probabilities, in
   *                          max_sequence_length x num_sequences x
   *                          sequence_width, (row-major) format
   * \param gradient          batch matrix of gradient, with the same shape as
   *                          input.
   * \param cpu_labels        labels always in CPU memory.
   * \param cpu_label_lengths length of all labels in CPU memory.
   * \param cpu_input_lengths length of all sequences in CPU memory.
   * \param sequence_width    number of possible output symbols.
   * \param num_sequences     number of sequence.
   * \param blank             blank label used in ctc loss function.
   * \param cpu_losss         cost of each sequence in CPU memory.
   */
101 102
  void operator()(const framework::ExecutionContext& ctx, const T* input,
                  T* gradient, const int* cpu_labels,
Y
Yiqun Liu 已提交
103 104
                  const int* cpu_label_lengths, const int* cpu_input_lengths,
                  const size_t sequence_width, const size_t num_sequences,
105
                  const size_t blank, T* cpu_loss) {
Y
Yiqun Liu 已提交
106 107 108 109 110 111
    // Init warp-ctc options
    init(ctx, blank);

    // Compute the required workspace size.
    // There is no memory allocated operations within warp-ctc.
    size_t workspace_bytes = 0;
112 113 114 115 116 117 118 119 120 121 122 123
    ctcStatus_t status = CTC_STATUS_UNKNOWN_ERROR;
    if (sizeof(T) == 4) {
      status = platform::dynload::get_workspace_size(
          cpu_label_lengths, cpu_input_lengths,
          static_cast<int>(sequence_width), static_cast<int>(num_sequences),
          options_, &workspace_bytes);
    } else {
      status = platform::dynload::get_workspace_size_double(
          cpu_label_lengths, cpu_input_lengths,
          static_cast<int>(sequence_width), static_cast<int>(num_sequences),
          options_, &workspace_bytes);
    }
124 125 126 127 128 129 130 131 132 133 134
    PADDLE_ENFORCE_EQ(
        CTC_STATUS_SUCCESS, status,
        platform::errors::PreconditionNotMet(
            "warp-ctc [version %d] Error in get_workspace_size: %s",
            warpctc_version_, platform::dynload::ctcGetStatusString(status)));
    PADDLE_ENFORCE_GT(
        workspace_bytes, 0UL,
        platform::errors::InvalidArgument(
            "Bytes of workspace got by warp-ctc function, "
            "get_workspace_size() should be larger than 0, but received %d",
            workspace_bytes));
Y
Yiqun Liu 已提交
135

L
Li Fuchen 已提交
136
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
137 138
    size_t workspace_elements = workspace_bytes / sizeof(T) + 1UL;
    Tensor workspace = ctx.AllocateTmpTensor<T, DeviceContext>(
Y
Yiqun Liu 已提交
139
        framework::make_ddim({static_cast<int64_t>(workspace_elements)}),
L
Li Fuchen 已提交
140
        dev_ctx);
141 142
    T* workspace_data = workspace.data<T>();
    math::SetConstant<DeviceContext, T>()(
Y
Yiqun Liu 已提交
143
        ctx.template device_context<DeviceContext>(), &workspace,
144
        static_cast<T>(0));
Y
Yiqun Liu 已提交
145 146

    // compute loss and gradient
147
    status = ComputeCtcLossFunctor<DeviceContext, T>()(
Y
Yiqun Liu 已提交
148 149 150
        input, gradient, cpu_labels, cpu_label_lengths, cpu_input_lengths,
        static_cast<int>(sequence_width), static_cast<int>(num_sequences),
        cpu_loss, workspace_data, options_);
151 152 153 154

    PADDLE_ENFORCE_EQ(
        CTC_STATUS_SUCCESS, status,
        platform::errors::PreconditionNotMet(
155
            "warp-ctc [version %d] Error in ComputeCtcLossFunctor: %s",
156
            warpctc_version_, platform::dynload::ctcGetStatusString(status)));
Y
Yiqun Liu 已提交
157 158 159 160 161 162 163
  }

 protected:
  void init(const framework::ExecutionContext& ctx, const size_t blank) {
    warpctc_version_ = platform::dynload::get_warpctc_version();

    if (platform::is_gpu_place(ctx.GetPlace())) {
164
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yiqun Liu 已提交
165 166 167 168 169
      options_.loc = CTC_GPU;
      options_.stream = reinterpret_cast<const platform::CUDADeviceContext&>(
                            ctx.device_context())
                            .stream();
#else
170 171
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "[warpctc init] GPU is not enabled."));
Y
Yiqun Liu 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
#endif
    } else {
      options_.loc = CTC_CPU;
      options_.num_threads = 1;
    }

    options_.blank_label = blank;
  }

 private:
  int warpctc_version_;
  ctcOptions options_;
};

template <typename DeviceContext, typename T>
class WarpCTCKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* logits = ctx.Input<LoDTensor>("Logits");
    auto* label = ctx.Input<LoDTensor>("Label");
    auto* warpctc_grad = ctx.Output<Tensor>("WarpCTCGrad");
    auto* loss = ctx.Output<Tensor>("Loss");

195 196 197 198 199 200 201 202 203
    size_t num_sequences, sequence_width, max_sequence_length;
    framework::Vector<size_t> logits_lod;
    framework::Vector<size_t> label_lod;

    if (ctx.HasInput("LogitsLength") && ctx.HasInput("LabelLength")) {
      num_sequences = logits->dims()[1];
      sequence_width = logits->dims()[2];
      max_sequence_length = logits->dims()[0];

H
Hui Zhang 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
      PADDLE_ENFORCE_GT(max_sequence_length, 0,
                        platform::errors::InvalidArgument(
                            "The first dimension of Input(Logits) should be "
                            "greater than zero "
                            "but received %d. ",
                            max_sequence_length));

      PADDLE_ENFORCE_GT(num_sequences, 0,
                        platform::errors::InvalidArgument(
                            "The second dimension of Input(Logits) should be "
                            "greater than zero "
                            "but received %d. ",
                            num_sequences));

      PADDLE_ENFORCE_GT(sequence_width, 0,
                        platform::errors::InvalidArgument(
                            "The third dimension of Input(Logits) should be "
                            "greater than zero "
                            "but received %d. ",
                            sequence_width));

225 226 227 228 229 230 231 232 233 234 235
      auto* logits_length = ctx.Input<framework::Tensor>("LogitsLength");
      auto* labels_length = ctx.Input<framework::Tensor>("LabelLength");
      framework::Tensor logits_length_cpu;
      framework::Tensor labels_length_cpu;
      framework::TensorCopy(*logits_length, platform::CPUPlace(),
                            &logits_length_cpu);
      framework::TensorCopy(*labels_length, platform::CPUPlace(),
                            &labels_length_cpu);

      logits_lod.push_back(0);
      label_lod.push_back(0);
236
      for (size_t i = 0; i < num_sequences; i++) {
237 238 239 240 241 242
        logits_lod.push_back(logits_lod[i] +
                             logits_length_cpu.data<int64_t>()[i]);
        label_lod.push_back(label_lod[i] +
                            labels_length_cpu.data<int64_t>()[i]);
      }
    } else {
243 244 245 246 247 248 249 250 251
      PADDLE_ENFORCE_GT(logits->NumLevels(), 0UL,
                        platform::errors::InvalidArgument(
                            "Input(Logits) Tensor of WarpCTC "
                            "does not contain LoD information."));
      PADDLE_ENFORCE_GT(label->NumLevels(), 0UL,
                        platform::errors::InvalidArgument(
                            "Input(Label) Tensor of WarpCTC "
                            "does not contain LoD information."));

252 253
      logits_lod = framework::ToAbsOffset(logits->lod())[0];
      auto logits_dims = logits->dims();
254

H
Hui Zhang 已提交
255 256 257 258 259 260 261
      PADDLE_ENFORCE_GT(logits_dims[0], 0,
                        platform::errors::InvalidArgument(
                            "The first dimension of Input(Logits) should be "
                            "greater than zero "
                            "but received %d. ",
                            logits_dims[0]));

262 263
      PADDLE_ENFORCE_EQ(
          logits_dims[0], static_cast<int64_t>(logits_lod.back()),
264 265 266 267
          platform::errors::InvalidArgument(
              "The first dimension of Input(Logits) should be equal to "
              "the sum of all sequences' lengths = %d., but received %d. ",
              static_cast<int64_t>(logits_lod.back()), logits_dims[0]));
268 269 270

      label_lod = framework::ToAbsOffset(label->lod())[0];
      auto label_dims = label->dims();
271 272 273 274 275
      PADDLE_ENFORCE_EQ(label_dims[1], 1,
                        platform::errors::InvalidArgument(
                            "The last dimension of Input(Label) should be 1, "
                            "but received %d",
                            label_dims[1]));
276 277

      num_sequences = logits_lod.size() - 1;
278 279 280 281 282 283
      PADDLE_ENFORCE_EQ(
          num_sequences, label_lod.size() - 1,
          platform::errors::InvalidArgument(
              "The number of sequences of Input(Logits) should be "
              "equal to that of Input(Label) = %d, but received %d",
              label_lod.size() - 1, num_sequences));
284 285 286 287 288

      sequence_width = logits->numel() / logits_dims[0];
      max_sequence_length = math::MaximumSequenceLength(logits_lod);
    }

Y
Yiqun Liu 已提交
289 290 291 292
    auto loss_dims =
        framework::make_ddim({static_cast<int64_t>(num_sequences), 1});

    // warpctc needs sequences data stored in transposed padding format
F
fengjiayi 已提交
293
    LoDTensor warpctc_logits;
Y
Yiqun Liu 已提交
294 295 296 297
    auto warpctc_logits_dims =
        framework::make_ddim({static_cast<int64_t>(max_sequence_length),
                              static_cast<int64_t>(num_sequences),
                              static_cast<int64_t>(sequence_width)});
L
Li Fuchen 已提交
298 299 300 301
    auto& dev_ctx = ctx.template device_context<DeviceContext>();
    Tensor warpctc_logits_tmp =
        ctx.AllocateTmpTensor<T, DeviceContext>(warpctc_logits_dims, dev_ctx);
    warpctc_logits.ShareDataWith(warpctc_logits_tmp);
302 303
    if (ctx.HasInput("LogitsLength")) {
      TensorCopySync(*logits, ctx.GetPlace(), &warpctc_logits);
F
fengjiayi 已提交
304
    } else {
305 306 307 308 309 310 311 312 313 314 315 316 317
      LoDTensor cpu_pad_value;
      T* pad_value_data =
          cpu_pad_value.mutable_data<T>({1}, platform::CPUPlace());
      *pad_value_data = static_cast<T>(0);
      LoDTensor pad_value;
      if (platform::is_cpu_place(ctx.GetPlace())) {
        pad_value = cpu_pad_value;
      } else {
        TensorCopySync(cpu_pad_value, ctx.GetPlace(), &pad_value);
      }

      math::PaddingLoDTensorFunctor<DeviceContext, T>()(
          ctx.template device_context<DeviceContext>(), *logits,
318 319
          &warpctc_logits, pad_value, -1, 0, false /* norm_by_times */, false,
          false, math::kLengthBatchWidth);
F
fengjiayi 已提交
320
    }
Y
Yiqun Liu 已提交
321 322 323 324 325 326
    const T* warpctc_logits_data = warpctc_logits.data<T>();

    std::vector<int> warpctc_label_lengths(num_sequences);
    std::vector<int> warpctc_logits_lengths(num_sequences);

    for (size_t i = 0; i < num_sequences; ++i) {
327 328
      warpctc_label_lengths[i] = label_lod[i + 1] - label_lod[i];
      warpctc_logits_lengths[i] = logits_lod[i + 1] - logits_lod[i];
Y
Yiqun Liu 已提交
329 330 331 332 333 334 335
    }

    // warpctc computes loss and gradient in one call, gradient data also stored
    // in batch format
    T* warpctc_grad_data =
        warpctc_grad->mutable_data<T>(warpctc_logits.dims(), ctx.GetPlace());

336 337 338 339
    math::SetConstant<DeviceContext, T>()(
        ctx.template device_context<DeviceContext>(), warpctc_grad,
        static_cast<T>(0));

Y
Yiqun Liu 已提交
340
    // warpctc accesses labels in CPU memory
W
whs 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353
    LoDTensor warpctc_label;
    if (ctx.HasInput("LogitsLength")) {
      warpctc_label.mutable_data<int>(
          {static_cast<int64_t>(math::TotalSequenceLength(label_lod)), 1},
          platform::CPUPlace());
      std::vector<framework::Vector<size_t>> lod;
      lod.push_back(label_lod);
      warpctc_label.set_lod(lod);

      if (platform::is_cpu_place(ctx.GetPlace())) {
        math::UnpaddingLoDTensorFunctor<DeviceContext, int>()(
            ctx.template device_context<DeviceContext>(), *label,
            &warpctc_label, label->dims()[1] /*pad_seq_len*/, 0 /*lod_level*/,
354
            false /*norm_by_times*/, false, false, math::kBatchLengthWidth);
W
whs 已提交
355 356 357 358 359 360 361 362 363
      } else {
        LoDTensor gpu_label;
        gpu_label.mutable_data<int>(
            {static_cast<int64_t>(math::TotalSequenceLength(label_lod)), 1},
            ctx.GetPlace());
        gpu_label.set_lod(lod);
        math::UnpaddingLoDTensorFunctor<DeviceContext, int>()(
            ctx.template device_context<DeviceContext>(), *label, &gpu_label,
            label->dims()[1] /*pad_seq_len*/, 0 /*lod_level*/,
364
            false /*norm_by_times*/, false, false, math::kBatchLengthWidth);
W
whs 已提交
365 366 367 368 369
        TensorCopySync(gpu_label, platform::CPUPlace(), &warpctc_label);
      }
    } else {
      TensorCopySync(*label, platform::CPUPlace(), &warpctc_label);
    }
370

Y
Yiqun Liu 已提交
371 372 373 374 375 376 377 378
    const int* warpctc_label_data = warpctc_label.data<int>();
    // warpctc stores loss in CPU memory
    Tensor warpctc_loss;
    T* warpctc_loss_data =
        warpctc_loss.mutable_data<T>(loss_dims, platform::CPUPlace());

    const size_t blank = static_cast<size_t>(ctx.Attr<int>("blank"));

379
    WarpCTCFunctor<DeviceContext, T>()(
Y
Yiqun Liu 已提交
380 381 382 383 384
        ctx, warpctc_logits_data, warpctc_grad_data, warpctc_label_data,
        warpctc_label_lengths.data(), warpctc_logits_lengths.data(),
        sequence_width, num_sequences, blank, warpctc_loss_data);

    // Copy the loss back
Y
Yi Wang 已提交
385
    TensorCopy(warpctc_loss, ctx.GetPlace(), ctx.device_context(), loss);
Y
Yiqun Liu 已提交
386 387 388 389 390 391 392
  }
};

template <typename DeviceContext, typename T>
class WarpCTCGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
393
    const Tensor* loss_grad = ctx.Input<Tensor>(framework::GradVarName("Loss"));
F
fengjiayi 已提交
394
    auto* warpctc_grad = ctx.Input<LoDTensor>("WarpCTCGrad");
Y
Yiqun Liu 已提交
395
    auto* logits_grad = ctx.Output<LoDTensor>(framework::GradVarName("Logits"));
W
wanghaoshuang 已提交
396 397

    logits_grad->mutable_data<T>(ctx.GetPlace());
Y
Yiqun Liu 已提交
398
    bool norm_by_times = ctx.Attr<bool>("norm_by_times");
399 400 401 402 403 404 405 406 407 408 409
    bool norm_by_batchsize = ctx.Attr<bool>("norm_by_batchsize");
    bool norm_by_total_logits_len = ctx.Attr<bool>("norm_by_total_logits_len");

    if ((norm_by_times && norm_by_batchsize) ||
        (norm_by_times && norm_by_total_logits_len) ||
        (norm_by_batchsize && norm_by_total_logits_len)) {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "[warpctc grad] norm_by_times, norm_by_batchsize and "
          "norm_by_total_logits_len "
          "should one be true."));
    }
410 411

    if (ctx.HasInput("LogitsLength")) {
412 413 414
      int max_seq_length = warpctc_grad->dims()[0];  // Tmax
      int num_sequences = warpctc_grad->dims()[1];   // B
      int seq_width = warpctc_grad->dims()[2];       // D
415

416
      auto* logits_length = ctx.Input<framework::Tensor>("LogitsLength");
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
      // B
      auto logits_len_e =
          framework::EigenTensor<int64_t, 1>::From(*logits_length);
      // (B, 1)
      auto loss_grad_e = framework::EigenTensor<T, 2>::From(*loss_grad);
      // (T, B, D)
      auto warpctc_grad_e = framework::EigenTensor<T, 3>::From(*warpctc_grad);

      auto logits_grad_e = framework::EigenTensor<T, 3>::From(*logits_grad);

      Eigen::DSizes<int, 3> grad_shape(1, num_sequences, 1);
      Eigen::DSizes<int, 3> bcast(max_seq_length, 1, seq_width);
      auto logits_g = warpctc_grad_e *
                      loss_grad_e.reshape(grad_shape).broadcast(bcast).eval();

      auto* place = ctx.template device_context<DeviceContext>().eigen_device();
      if (norm_by_total_logits_len) {
        // Compute the avg. log-probability per batch sample and frame.
        // Rank is 0
        auto inv_len = logits_len_e.sum().cast<T>().inverse().eval();
        logits_grad_e.device(*place) =
            logits_g *
            inv_len.reshape(Eigen::DSizes<int, 3>{1, 1, 1})
                .broadcast(Eigen::DSizes<int, 3>{max_seq_length, num_sequences,
                                                 seq_width});
      } else if (norm_by_batchsize) {
        // Compute the avg. log-probability per batch sample.
        T scale = 1.0 / static_cast<T>(num_sequences);
        logits_grad_e.device(*place) = logits_g * scale;
      } else if (norm_by_times) {
        auto scales = logits_len_e.cast<T>()
                          .inverse()
                          .reshape(grad_shape)
                          .broadcast(bcast)
                          .eval();
        logits_grad_e.device(*place) = logits_g * scales;
      } else {
        logits_grad_e.device(*place) = logits_g;
455 456 457 458
      }
    } else {
      math::UnpaddingLoDTensorFunctor<DeviceContext, T>()(
          ctx.template device_context<DeviceContext>(), *warpctc_grad,
459 460
          logits_grad, -1, 0, norm_by_times, norm_by_batchsize,
          norm_by_total_logits_len, math::kLengthBatchWidth);
461 462 463 464 465 466

      const T* loss_grad_data = loss_grad->data<T>();
      math::ScaleLoDTensorFunctor<DeviceContext, T>()(
          ctx.template device_context<DeviceContext>(), loss_grad_data,
          logits_grad);
    }
Y
Yiqun Liu 已提交
467 468 469 470 471
  }
};

}  // namespace operators
}  // namespace paddle