sequence_padding.cc 5.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yiqun Liu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/math/sequence_padding.h"
Y
Yiqun Liu 已提交
16 17 18 19 20

namespace paddle {
namespace operators {
namespace math {

F
fengjiayi 已提交
21 22
enum CopyType { kSeqToPad, kPadToSeq };

Y
yangyaming 已提交
23
template <typename T>
F
fengjiayi 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
void CopyValidData(framework::Tensor* dst_tensor,
                   const framework::Tensor* src_tensor,
                   const framework::Vector<size_t>& seq_offsets,
                   int pad_seq_len, int step_width, bool norm_by_len,
                   CopyType type, PadLayout layout) {
  int seq_num = seq_offsets.size() - 1;
  const T* src_data = src_tensor->data<T>();
  T* dst_data = dst_tensor->data<T>();

  int seq_cpy_gap = step_width;
  int pad_cpy_gap =
      layout == kBatchLengthWidth ? step_width : seq_num * step_width;
  for (int seq_idx = 0; seq_idx < seq_num; ++seq_idx) {
    int valid_seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx];
    PADDLE_ENFORCE_GE(
        pad_seq_len, valid_seq_len,
        "The padded sequence length can not be less than its original length.");
    int seq_data_offset = seq_offsets[seq_idx] * step_width;
    int pad_data_offset = layout == kBatchLengthWidth
                              ? seq_idx * pad_seq_len * step_width
                              : seq_idx * step_width;
    float scale = 1.0f / static_cast<float>(valid_seq_len);

    for (int step_idx = 0; step_idx < valid_seq_len; ++step_idx) {
      const T* src =
          src_data + (type == kSeqToPad ? seq_data_offset : pad_data_offset);
      T* dst =
          dst_data + (type == kSeqToPad ? pad_data_offset : seq_data_offset);
      memcpy(dst, src, step_width * sizeof(T));
      if (norm_by_len) {
        for (int i = 0; i < step_width; ++i) {
          *(dst + i) *= scale;
Y
Yiqun Liu 已提交
56 57
        }
      }
F
fengjiayi 已提交
58 59
      seq_data_offset += seq_cpy_gap;
      pad_data_offset += pad_cpy_gap;
Y
Yiqun Liu 已提交
60 61
    }
  }
62 63
}

Y
yangyaming 已提交
64 65
template <typename T>
class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
66 67 68
 public:
  void operator()(const platform::CPUDeviceContext& context,
                  const framework::LoDTensor& seq_tensor,
F
fengjiayi 已提交
69 70 71 72 73
                  framework::LoDTensor* pad_tensor,
                  std::vector<T> pad_value = {0}, int pad_seq_len = -1,
                  int lod_level = 0, bool norm_by_times = false,
                  const PadLayout layout = kBatchLengthWidth) {
    auto seq_offsets = framework::ToAbsOffset(seq_tensor.lod())[lod_level];
Y
yangyaming 已提交
74 75
    auto seq_tensor_dims = seq_tensor.dims();
    auto pad_tensor_dims = pad_tensor->dims();
F
fengjiayi 已提交
76 77 78 79
    if (pad_seq_len == -1) {
      pad_seq_len = MaximumSequenceLength(seq_offsets);
    }
    int step_width = seq_tensor.numel() / seq_tensor_dims[0];
80

F
fengjiayi 已提交
81 82 83 84 85 86
    CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len,
              step_width, layout);
    PADDLE_ENFORCE(pad_value.size() == 1 ||
                       static_cast<int>(pad_value.size()) == step_width,
                   "The size of 'pad_value' can only be 1 or be equal to the "
                   "'step_width'.");
87

F
fengjiayi 已提交
88 89 90
    if (pad_value.size() == 1) {
      pad_value = std::vector<T>(step_width, pad_value[0]);
    }
91

F
fengjiayi 已提交
92 93 94 95 96
    // fill padding value
    T* pad_data = pad_tensor->data<T>();
    for (int i = 0; i < pad_tensor->numel() / step_width; ++i) {
      memcpy(pad_data, pad_value.data(), step_width * sizeof(T));
    }
97

F
fengjiayi 已提交
98 99
    CopyValidData<T>(pad_tensor, &seq_tensor, seq_offsets, pad_seq_len,
                     step_width, norm_by_times, kSeqToPad, layout);
100
  }
Y
Yiqun Liu 已提交
101 102
};

Y
yangyaming 已提交
103 104
template <typename T>
class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
Y
Yiqun Liu 已提交
105 106
 public:
  void operator()(const platform::CPUDeviceContext& context,
F
fengjiayi 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
                  const framework::LoDTensor& pad_tensor,
                  framework::LoDTensor* seq_tensor, int pad_seq_len = -1,
                  int lod_level = 0, bool norm_by_times = false,
                  const PadLayout& layout = kBatchLengthWidth) {
    auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level];
    auto seq_tensor_dims = seq_tensor->dims();
    auto pad_tensor_dims = pad_tensor.dims();
    if (pad_seq_len == -1) {
      pad_seq_len = MaximumSequenceLength(seq_offsets);
    }
    int step_width = seq_tensor->numel() / seq_tensor_dims[0];

    CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len,
              step_width, layout);

    CopyValidData<T>(seq_tensor, &pad_tensor, seq_offsets, pad_seq_len,
                     step_width, norm_by_times, kPadToSeq, layout);
Y
Yiqun Liu 已提交
124 125 126
  }
};

Y
yangyaming 已提交
127 128 129 130 131 132 133 134 135
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, int>;
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, int64_t>;
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, float>;
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, double>;

template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, int>;
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, int64_t>;
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, float>;
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, double>;
Y
Yiqun Liu 已提交
136 137 138 139

}  // namespace math
}  // namespace operators
}  // namespace paddle