sequence_padding.h 4.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yiqun Liu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17
#include <algorithm>
F
fengjiayi 已提交
18
#include <vector>
Y
Yi Wang 已提交
19 20
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/device_context.h"
Y
Yiqun Liu 已提交
21 22 23 24 25

namespace paddle {
namespace operators {
namespace math {

F
fengjiayi 已提交
26
enum PadLayout { kBatchLengthWidth = 0, kLengthBatchWidth };
27

F
fengjiayi 已提交
28 29
enum CopyType { kSeqToPad, kPadToSeq };

Y
yangyaming 已提交
30 31 32
inline static size_t MaximumSequenceLength(
    const framework::Vector<size_t>& seq_offset) {
  size_t seq_num = seq_offset.size() - 1;
33 34
  size_t max_seq_len = 0;
  for (size_t i = 0; i < seq_num; ++i) {
Y
yangyaming 已提交
35
    max_seq_len = std::max(max_seq_len, seq_offset[i + 1] - seq_offset[i]);
36 37 38 39
  }
  return max_seq_len;
}

Y
yangyaming 已提交
40 41
inline static void CheckDims(const framework::DDim& seq_tensor_dims,
                             const framework::DDim& pad_tensor_dims,
F
fengjiayi 已提交
42 43 44 45
                             const framework::Vector<size_t>& seq_offset,
                             int64_t padded_seq_len, int64_t step_width,
                             const PadLayout& layout) {
  PADDLE_ENFORCE_EQ(static_cast<size_t>(seq_tensor_dims[0]), seq_offset.back(),
Y
yangyaming 已提交
46 47
                    "Value of 1st dimension of the sequence tensor should be "
                    "equal to sum of lengths of all sequences.");
48

F
fengjiayi 已提交
49 50 51 52
  PADDLE_ENFORCE(seq_tensor_dims.size() + 1 == pad_tensor_dims.size() ||
                     seq_tensor_dims.size() == pad_tensor_dims.size(),
                 "pad_tensor's rank should be 1 greater than seq_tensor's "
                 "rank, or be equal with it.");
Y
Yiqun Liu 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
}

/*
 * \brief   Padding/Unpadding LoDTensor to/from normal Tensor of the shape
 *          [max_sequence_length, num_sequences, sequence_width].
 *
 *  Padding sequence:
 *        padding[i] = seq[lod[level][i]]
 *  Unpadding sequence:
 *        seq[lod[level][i]] = padding[i]
 *
 *  All sequences will be padded to the same length and stored in a transposed
 * shape.
 *  Example:
 *    seq     (s0, s0, s0, s0; s1, s1; s2, s2, s2; s3)
 *    padding (s0, s1, s2, s3; s0, s1, s2, 0; s0, 0, s2, 0; s0, 0, 0, 0)
 *
 * \param context       device context of this functor.
 * \param seq           LoDTensor which is stored in sequence format, the shape
 *                      is [total_sequence_length, sequence_width] where
 *                      total_sequence_length is the sum of all sequences'
 *                      length.
 * \param padding       Tensor which is padded to the same length, the shape is
 *                      [max_sequence_length, num_sequences, sequence_width].
 * \param norm_by_times whether dividing sequence's length.
 *
 * \note  transposition is also done in this functor.
 */
Y
yangyaming 已提交
81
template <typename DeviceContext, typename T>
Y
Yiqun Liu 已提交
82 83
class PaddingLoDTensorFunctor {
 public:
F
fengjiayi 已提交
84
  void operator()(const platform::CPUDeviceContext& context,
85
                  const framework::LoDTensor& seq_tensor,
F
fengjiayi 已提交
86
                  framework::LoDTensor* pad_tensor,
F
fengjiayi 已提交
87
                  const framework::LoDTensor& pad_value, int pad_seq_len = -1,
F
fengjiayi 已提交
88 89
                  int lod_level = 0, bool norm_by_times = false,
                  const PadLayout layout = kBatchLengthWidth);
Y
Yiqun Liu 已提交
90 91
};

Y
yangyaming 已提交
92
template <typename DeviceContext, typename T>
Y
Yiqun Liu 已提交
93 94
class UnpaddingLoDTensorFunctor {
 public:
F
fengjiayi 已提交
95 96 97 98
  void operator()(const platform::CPUDeviceContext& context,
                  const framework::LoDTensor& pad_tensor,
                  framework::LoDTensor* seq_tensor, int pad_seq_len = -1,
                  int lod_level = 0, bool norm_by_times = false,
F
fengjiayi 已提交
99
                  const PadLayout layout = kBatchLengthWidth);
Y
Yiqun Liu 已提交
100 101 102 103 104
};

}  // namespace math
}  // namespace operators
}  // namespace paddle