You need to sign in or sign up before continuing.
sequence2batch.h 5.7 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16 17 18 19
#pragma once
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"

D
dangqingqing 已提交
20 21 22 23
namespace paddle {
namespace operators {
namespace math {

D
dangqingqing 已提交
24 25 26 27 28 29 30 31 32
template <typename Place, typename T>
class CopyMatrixRowsFunctor {
 public:
  // If is_src_index is true,
  // copy the indexed rows of input src to the output dst.
  // If is_src_index is false,
  // copy the input src to the indexed rows of output dst.
  // The indexed rows are based on the input index.
  void operator()(const platform::DeviceContext& context,
33
                  const framework::LoDTensor& src, const size_t* index,
Y
Yu Yang 已提交
34
                  framework::LoDTensor& dst, bool is_src_index);
D
dangqingqing 已提交
35 36
};

D
dangqingqing 已提交
37 38
template <typename Place, typename T>
class LoDTensor2BatchFunctor {
Y
Yu Yang 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52
  // Calculate the length of each sequence and
  // sort sequence index by the length.
  // example:  sequences = {s0, s1, s2}
  //           s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
  //           seq_info[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)}
  //
  struct SeqInfo {
    SeqInfo(int start, int length, int seq_idx)
        : start(start), length(length), seq_idx(seq_idx) {}
    int start;
    int length;
    int seq_idx;
  };

D
dangqingqing 已提交
53 54 55
 public:
  void operator()(const platform::DeviceContext& context,
                  const framework::LoDTensor& lod_tensor,
D
dangqingqing 已提交
56 57 58 59 60
                  framework::LoDTensor& batch, bool is_cal_batch_lod,
                  bool is_reverse = false) const {
    if (!is_cal_batch_lod) {
      auto lods = batch.lod();
      PADDLE_ENFORCE_EQ(lods.size(), 2UL);
D
dangqingqing 已提交
61 62
      PADDLE_ENFORCE_EQ(lods[1].size(),
                        static_cast<size_t>(lod_tensor.dims()[0]));
D
dangqingqing 已提交
63 64 65 66 67
      CopyMatrixRowsFunctor<Place, T> to_batch;
      to_batch(context, lod_tensor, lods[1].data(), batch, true);
      return;
    }

68 69
    auto lods = lod_tensor.lod();
    PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now.");
D
dangqingqing 已提交
70 71 72
    auto lod = lods[0];

    std::vector<SeqInfo> seq_info;
73
    for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) {
D
dangqingqing 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
      int length = lod[seq_id + 1] - lod[seq_id];
      seq_info.emplace_back(lod[seq_id], length, seq_id);
    }

    std::sort(seq_info.begin(), seq_info.end(),
              [](SeqInfo a, SeqInfo b) { return a.length > b.length; });

    // calculate the start position of each batch
    // (numBatch equal the maxLength of sequences)
    // example:  sequences = {s0, s1, s2}
    //           s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
    //           num_batch = 5,
    //           batchIndex = {b0, b1, b2, b3, b4}
    //           b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
    //           batch_start_positions[6] = {0, 3, 6, 9, 11, 12}
Y
Yu Yang 已提交
89 90 91 92
    //              batch_start_positions[0] = len(b0)
    //              batch_start_positions[1] = len(b0) + len(b1)
    //              batch_start_positions[2] = len(b0) + len(b1) + len(b2)
    //              ...
D
dangqingqing 已提交
93 94 95 96 97 98 99
    //           seq2batch_idx[12] = {4, 0, 9,
    //                                5, 1, 10,
    //                                6, 2, 11,
    //                                7, 3,
    //                                8}
    // The batch number represents batch size after rearranging the
    // input LodTensor. It is also the maximum length of input sequence.
100 101

    paddle::framework::LoD batch_lods;
Y
Yu Yang 已提交
102 103
    batch_lods.emplace_back(std::vector<size_t>{0});
    batch_lods.emplace_back(std::vector<size_t>{0});
104

D
dangqingqing 已提交
105
    // batch_lods[0] is the start positions for batch LoDTensor
Y
Yu Yang 已提交
106 107
    int num_batch = seq_info[0].length;
    batch_lods[0].resize(static_cast<size_t>(num_batch + 1));
D
dangqingqing 已提交
108
    // batch_lods[1] is the raw index in the input LoDTensor
109
    auto dims = lod_tensor.dims();
Y
Yu Yang 已提交
110
    batch_lods[1].resize(static_cast<size_t>(dims[0]));
D
dangqingqing 已提交
111

112 113
    size_t* batch_starts = batch_lods[0].data();
    size_t* seq2batch_idx = batch_lods[1].data();
D
dangqingqing 已提交
114
    batch_starts[0] = 0;
D
dangqingqing 已提交
115
    for (int n = 0; n < num_batch; n++) {
Y
Yu Yang 已提交
116
      auto batch_id = static_cast<int>(batch_starts[n]);
D
dangqingqing 已提交
117
      for (size_t i = 0; i < seq_info.size(); ++i) {
D
dangqingqing 已提交
118
        int seq_len = seq_info[i].length;
D
dangqingqing 已提交
119 120
        int start = seq_info[i].start;
        if (n < seq_len) {
D
dangqingqing 已提交
121 122
          seq2batch_idx[batch_id] =
              is_reverse ? start + seq_len - 1 - n : start + n;
D
dangqingqing 已提交
123 124 125 126 127
          batch_id++;
        } else {
          break;
        }
      }
Y
Yu Yang 已提交
128
      batch_starts[n + 1] = static_cast<size_t>(batch_id);
D
dangqingqing 已提交
129
    }
130
    batch.set_lod(batch_lods);
D
dangqingqing 已提交
131 132

    CopyMatrixRowsFunctor<Place, T> to_batch;
133
    to_batch(context, lod_tensor, seq2batch_idx, batch, true);
D
dangqingqing 已提交
134
  }
D
dangqingqing 已提交
135
};
D
dangqingqing 已提交
136 137

template <typename Place, typename T>
138
class Batch2LoDTensorFunctor {
D
dangqingqing 已提交
139 140 141
 public:
  void operator()(const platform::DeviceContext& context,
                  const framework::LoDTensor& batch,
142 143 144 145
                  framework::LoDTensor& lod_tensor) const {
    auto in_lod = batch.lod();
    PADDLE_ENFORCE_EQ(in_lod.size(), 2UL,
                      "The LoD size of input `batch` should be 2.");
146 147
    PADDLE_ENFORCE_EQ(in_lod[1].size(),
                      static_cast<size_t>(lod_tensor.dims()[0]));
148
    CopyMatrixRowsFunctor<Place, T> to_seq;
149
    size_t* index = in_lod[1].data();
150 151
    to_seq(context, batch, index, lod_tensor, false);
  }
D
dangqingqing 已提交
152
};
D
dangqingqing 已提交
153 154 155 156

}  // namespace math
}  // namespace operators
}  // namespace paddle