sequence_padding.cc 7.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yiqun Liu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/math/sequence_padding.h"
Y
Yiqun Liu 已提交
16

W
wanghuancoder 已提交
17 18 19 20 21 22 23 24 25 26
namespace paddle {
namespace framework {
class LoDTensor;
class Tensor;
}  // namespace framework
namespace platform {
class CPUDeviceContext;
}  // namespace platform
}  // namespace paddle

Y
Yiqun Liu 已提交
27 28 29 30
namespace paddle {
namespace operators {
namespace math {

Y
yangyaming 已提交
31
template <typename T>
F
fengjiayi 已提交
32 33 34 35
void CopyValidData(framework::Tensor* dst_tensor,
                   const framework::Tensor* src_tensor,
                   const framework::Vector<size_t>& seq_offsets,
                   int pad_seq_len, int step_width, bool norm_by_len,
36 37
                   bool norm_by_batchsize, bool norm_by_total_logits_len,
                   int total_logits_len, CopyType type, PadLayout layout) {
F
fengjiayi 已提交
38 39 40 41 42 43 44 45 46 47 48
  int seq_num = seq_offsets.size() - 1;
  const T* src_data = src_tensor->data<T>();
  T* dst_data = dst_tensor->data<T>();

  int seq_cpy_gap = step_width;
  int pad_cpy_gap =
      layout == kBatchLengthWidth ? step_width : seq_num * step_width;
  for (int seq_idx = 0; seq_idx < seq_num; ++seq_idx) {
    int valid_seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx];
    PADDLE_ENFORCE_GE(
        pad_seq_len, valid_seq_len,
49 50 51 52 53
        platform::errors::InvalidArgument(
            "The padded sequence length can not "
            "be less than its original length. Expected %ld >= %ld, but got "
            "%ld < %ld. Please check input value.",
            pad_seq_len, valid_seq_len, pad_seq_len, valid_seq_len));
F
fengjiayi 已提交
54 55 56 57
    int seq_data_offset = seq_offsets[seq_idx] * step_width;
    int pad_data_offset = layout == kBatchLengthWidth
                              ? seq_idx * pad_seq_len * step_width
                              : seq_idx * step_width;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72

    float scale = 1.0f;
    if (norm_by_total_logits_len) {
      scale = 1.0f / static_cast<float>(total_logits_len);
      VLOG(3) << "[warpctc grad][norm_by_total_logits_len]: scale " << scale
              << "total_logits_len " << total_logits_len;
    } else if (norm_by_batchsize) {
      scale = 1.0f / static_cast<float>(seq_num);
      VLOG(3) << "[warpctc grad][norm_by_batchsize]: scale " << scale << "B "
              << seq_num;
    } else if (norm_by_len) {
      scale = 1.0f / static_cast<float>(valid_seq_len);
      VLOG(3) << "[warpctc grad][norm_by_len]: scale " << scale << "T "
              << valid_seq_len;
    }
F
fengjiayi 已提交
73 74 75 76 77 78 79 80 81 82

    for (int step_idx = 0; step_idx < valid_seq_len; ++step_idx) {
      const T* src =
          src_data + (type == kSeqToPad ? seq_data_offset : pad_data_offset);
      T* dst =
          dst_data + (type == kSeqToPad ? pad_data_offset : seq_data_offset);
      memcpy(dst, src, step_width * sizeof(T));
      if (norm_by_len) {
        for (int i = 0; i < step_width; ++i) {
          *(dst + i) *= scale;
Y
Yiqun Liu 已提交
83 84
        }
      }
F
fengjiayi 已提交
85 86
      seq_data_offset += seq_cpy_gap;
      pad_data_offset += pad_cpy_gap;
Y
Yiqun Liu 已提交
87 88
    }
  }
89 90
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
template <typename T>
static void fast_mem_init(void* dest, size_t dest_size, const T* src,
                          size_t num_bytes) {
  if (dest == nullptr || dest_size == 0 || src == nullptr) return;

  memcpy(dest, src, num_bytes);

  dest_size *= num_bytes;
  while (dest_size > num_bytes) {
    size_t remaining = dest_size - num_bytes;
    size_t count = (remaining > num_bytes) ? num_bytes : remaining;
    memcpy((unsigned char*)dest + num_bytes, dest, count);
    num_bytes += count;
  }
}

Y
yangyaming 已提交
107 108
template <typename T>
class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
109 110 111
 public:
  void operator()(const platform::CPUDeviceContext& context,
                  const framework::LoDTensor& seq_tensor,
F
fengjiayi 已提交
112
                  framework::LoDTensor* pad_tensor,
F
fengjiayi 已提交
113
                  const framework::LoDTensor& pad_value, int pad_seq_len = -1,
F
fengjiayi 已提交
114
                  int lod_level = 0, bool norm_by_times = false,
115 116
                  bool norm_by_batchsize = false,
                  bool norm_by_total_logits_len = false,
F
fengjiayi 已提交
117
                  const PadLayout layout = kBatchLengthWidth) {
118 119 120 121
    auto seq_lod = seq_tensor.lod();
    const auto seq_offsets = framework::ToAbsOffset(seq_lod)[lod_level];
    const auto& seq_tensor_dims = seq_tensor.dims();
    const auto& pad_tensor_dims = pad_tensor->dims();
F
fengjiayi 已提交
122 123 124 125
    if (pad_seq_len == -1) {
      pad_seq_len = MaximumSequenceLength(seq_offsets);
    }
    int step_width = seq_tensor.numel() / seq_tensor_dims[0];
126

F
fengjiayi 已提交
127 128
    CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len,
              step_width, layout);
129 130 131 132 133 134 135 136

    PADDLE_ENFORCE_EQ(
        pad_value.numel() == 1 || pad_value.numel() == step_width, true,
        platform::errors::InvalidArgument(
            "The numel of 'pad_value' can only be 1 or be equal to the "
            "'step_width', but got %ld != 1 and %ld. Please check the input "
            "value.",
            pad_value.numel(), step_width));
137

F
fengjiayi 已提交
138 139
    // fill padding value
    T* pad_data = pad_tensor->data<T>();
F
fengjiayi 已提交
140 141
    const T* pad_value_data = pad_value.data<T>();
    if (pad_value.numel() == 1) {
142 143
      fast_mem_init<T>(pad_data, pad_tensor->numel(), pad_value_data,
                       sizeof(T));
F
fengjiayi 已提交
144 145 146 147
    } else {
      for (int i = 0; i < pad_tensor->numel(); i += step_width) {
        memcpy(pad_data + i, pad_value_data, step_width * sizeof(T));
      }
F
fengjiayi 已提交
148
    }
149

F
fengjiayi 已提交
150
    CopyValidData<T>(pad_tensor, &seq_tensor, seq_offsets, pad_seq_len,
151 152
                     step_width, norm_by_times, false, false, 0, kSeqToPad,
                     layout);
153
  }
Y
Yiqun Liu 已提交
154 155
};

Y
yangyaming 已提交
156 157
template <typename T>
class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
Y
Yiqun Liu 已提交
158 159
 public:
  void operator()(const platform::CPUDeviceContext& context,
F
fengjiayi 已提交
160 161 162
                  const framework::LoDTensor& pad_tensor,
                  framework::LoDTensor* seq_tensor, int pad_seq_len = -1,
                  int lod_level = 0, bool norm_by_times = false,
163 164
                  bool norm_by_batchsize = false,
                  bool norm_by_total_logits_len = false,
F
fengjiayi 已提交
165
                  const PadLayout layout = kBatchLengthWidth) {
F
fengjiayi 已提交
166
    auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level];
167 168
    const auto& seq_tensor_dims = seq_tensor->dims();
    const auto& pad_tensor_dims = pad_tensor.dims();
F
fengjiayi 已提交
169 170 171
    if (pad_seq_len == -1) {
      pad_seq_len = MaximumSequenceLength(seq_offsets);
    }
172
    int total_logits_len = TotalSequenceLength(seq_offsets);
F
fengjiayi 已提交
173 174 175 176 177 178
    int step_width = seq_tensor->numel() / seq_tensor_dims[0];

    CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len,
              step_width, layout);

    CopyValidData<T>(seq_tensor, &pad_tensor, seq_offsets, pad_seq_len,
179 180 181
                     step_width, norm_by_times, norm_by_batchsize,
                     norm_by_total_logits_len, total_logits_len, kPadToSeq,
                     layout);
Y
Yiqun Liu 已提交
182 183 184
  }
};

Y
yangyaming 已提交
185 186 187 188 189 190 191 192 193
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, int>;
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, int64_t>;
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, float>;
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, double>;

template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, int>;
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, int64_t>;
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, float>;
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, double>;
Y
Yiqun Liu 已提交
194 195 196 197

}  // namespace math
}  // namespace operators
}  // namespace paddle