sequence2batch.h 6.2 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16 17 18 19
#pragma once
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"

D
dangqingqing 已提交
20 21 22 23
namespace paddle {
namespace operators {
namespace math {

D
dangqingqing 已提交
24 25 26 27 28 29 30 31 32
template <typename Place, typename T>
class CopyMatrixRowsFunctor {
 public:
  // If is_src_index is true,
  // copy the indexed rows of input src to the output dst.
  // If is_src_index is false,
  // copy the input src to the indexed rows of output dst.
  // The indexed rows are based on the input index.
  void operator()(const platform::DeviceContext& context,
33 34
                  const framework::Tensor& src, const size_t* index,
                  framework::Tensor* dst, bool is_src_index);
D
dangqingqing 已提交
35 36
};

D
dangqingqing 已提交
37 38
template <typename Place, typename T>
class LoDTensor2BatchFunctor {
Y
Yu Yang 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52
  // Calculate the length of each sequence and
  // sort sequence index by the length.
  // example:  sequences = {s0, s1, s2}
  //           s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
  //           seq_info[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)}
  //
  struct SeqInfo {
    SeqInfo(int start, int length, int seq_idx)
        : start(start), length(length), seq_idx(seq_idx) {}
    int start;
    int length;
    int seq_idx;
  };

D
dangqingqing 已提交
53 54 55
 public:
  void operator()(const platform::DeviceContext& context,
                  const framework::LoDTensor& lod_tensor,
D
dangqingqing 已提交
56 57 58 59
                  framework::LoDTensor& batch, bool is_cal_batch_lod,
                  bool is_reverse = false) const {
    if (!is_cal_batch_lod) {
      auto lods = batch.lod();
60
      PADDLE_ENFORCE_LE(lods.size(), 2UL);
D
dangqingqing 已提交
61 62
      PADDLE_ENFORCE_EQ(lods[1].size(),
                        static_cast<size_t>(lod_tensor.dims()[0]));
D
dangqingqing 已提交
63 64 65 66 67
      CopyMatrixRowsFunctor<Place, T> to_batch;
      to_batch(context, lod_tensor, lods[1].data(), batch, true);
      return;
    }

68
    auto lods = lod_tensor.lod();
D
dangqingqing 已提交
69
    auto lod = lods[0];
70 71 72
    PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now.");
    PADDLE_ENFORCE_EQ(lod_tensor.dims()[0],
                      static_cast<int64_t>(lod.size() - 1));
D
dangqingqing 已提交
73 74

    std::vector<SeqInfo> seq_info;
75
    for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) {
D
dangqingqing 已提交
76 77 78 79 80 81 82
      int length = lod[seq_id + 1] - lod[seq_id];
      seq_info.emplace_back(lod[seq_id], length, seq_id);
    }

    std::sort(seq_info.begin(), seq_info.end(),
              [](SeqInfo a, SeqInfo b) { return a.length > b.length; });

83
    // Calculate the start position of each batch.
D
dangqingqing 已提交
84 85 86 87 88 89
    // example:  sequences = {s0, s1, s2}
    //           s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
    //           num_batch = 5,
    //           batchIndex = {b0, b1, b2, b3, b4}
    //           b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
    //           batch_start_positions[6] = {0, 3, 6, 9, 11, 12}
Y
Yu Yang 已提交
90 91 92 93
    //              batch_start_positions[0] = len(b0)
    //              batch_start_positions[1] = len(b0) + len(b1)
    //              batch_start_positions[2] = len(b0) + len(b1) + len(b2)
    //              ...
D
dangqingqing 已提交
94 95 96 97 98
    //           seq2batch_idx[12] = {4, 0, 9,
    //                                5, 1, 10,
    //                                6, 2, 11,
    //                                7, 3,
    //                                8}
99 100 101 102 103
    //           seq_order = {1, 0, 2}, the sort order.
    //               where 1 is the second sequence,
    //                     0 is the first sequence,
    //                     2 is the third sequence.
    // The num_batch represents batch size after rearranging the
D
dangqingqing 已提交
104
    // input LodTensor. It is also the maximum length of input sequence.
105 106

    paddle::framework::LoD batch_lods;
Y
Yu Yang 已提交
107 108
    batch_lods.emplace_back(std::vector<size_t>{0});
    batch_lods.emplace_back(std::vector<size_t>{0});
109
    batch_lods.emplace_back(std::vector<size_t>{0});
110

D
dangqingqing 已提交
111
    // batch_lods[0] is the start positions for batch LoDTensor
Y
Yu Yang 已提交
112 113
    int num_batch = seq_info[0].length;
    batch_lods[0].resize(static_cast<size_t>(num_batch + 1));
D
dangqingqing 已提交
114
    // batch_lods[1] is the raw index in the input LoDTensor
115 116 117
    batch_lods[1].resize(static_cast<size_t>(seq_info.size()));
    // batch_lods[2] is the sort order for the input LoDTensor.
    batch_lods[2].resize(seq_info.size());
D
dangqingqing 已提交
118

119 120
    size_t* batch_starts = batch_lods[0].data();
    size_t* seq2batch_idx = batch_lods[1].data();
D
dangqingqing 已提交
121
    batch_starts[0] = 0;
D
dangqingqing 已提交
122
    for (int n = 0; n < num_batch; n++) {
Y
Yu Yang 已提交
123
      auto batch_id = static_cast<int>(batch_starts[n]);
D
dangqingqing 已提交
124
      for (size_t i = 0; i < seq_info.size(); ++i) {
D
dangqingqing 已提交
125
        int seq_len = seq_info[i].length;
D
dangqingqing 已提交
126 127
        int start = seq_info[i].start;
        if (n < seq_len) {
D
dangqingqing 已提交
128 129
          seq2batch_idx[batch_id] =
              is_reverse ? start + seq_len - 1 - n : start + n;
D
dangqingqing 已提交
130 131 132 133 134
          batch_id++;
        } else {
          break;
        }
      }
Y
Yu Yang 已提交
135
      batch_starts[n + 1] = static_cast<size_t>(batch_id);
D
dangqingqing 已提交
136
    }
137 138 139 140
    size_t* seq_order = batch_lods[2].data();
    for (size_t i = 0; i < seq_info.size(); ++i) {
      seq_order[i] = seq_info[i].seq_idx;
    }
141
    batch.set_lod(batch_lods);
D
dangqingqing 已提交
142 143

    CopyMatrixRowsFunctor<Place, T> to_batch;
144
    to_batch(context, lod_tensor, seq2batch_idx, batch, true);
D
dangqingqing 已提交
145
  }
D
dangqingqing 已提交
146
};
D
dangqingqing 已提交
147 148

template <typename Place, typename T>
149
class Batch2LoDTensorFunctor {
D
dangqingqing 已提交
150 151 152
 public:
  void operator()(const platform::DeviceContext& context,
                  const framework::LoDTensor& batch,
153 154
                  framework::LoDTensor& lod_tensor) const {
    auto in_lod = batch.lod();
155
    PADDLE_ENFORCE_LT(in_lod.size(), 2UL,
156
                      "The LoD size of input `batch` should be 2.");
157 158
    PADDLE_ENFORCE_EQ(in_lod[1].size(),
                      static_cast<size_t>(lod_tensor.dims()[0]));
159
    CopyMatrixRowsFunctor<Place, T> to_seq;
160
    size_t* index = in_lod[1].data();
161 162
    to_seq(context, batch, index, lod_tensor, false);
  }
D
dangqingqing 已提交
163
};
D
dangqingqing 已提交
164 165 166 167

}  // namespace math
}  // namespace operators
}  // namespace paddle