context_project.h 8.7 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/framework/eigen.h"
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/math/im2col.h"

namespace paddle {
namespace operators {
namespace math {

template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
/*
C
chengduoZH 已提交
30 31 32 33 34
 * \brief Context projection concatenate features in adjacent time steps in
 * a sequence. The i-th row of the output is the concatenation of
 * context_length rows of the input. The context_length rows are the
 * consecutive rows from the i+shift_start row.

C
chengduoZH 已提交
35
 * \param in            Input data.
C
chengduoZH 已提交
36
 * \param Shape         The shape of Input data,
C
chengduoZH 已提交
37
 *                      [minibatch, input_hidden_size].
C
chengduoZH 已提交
38
 *
C
chengduoZH 已提交
39
 * \param padding_data  Padding data.
C
chengduoZH 已提交
40
 * \param Shape         The shape of Padding data,
C
chengduoZH 已提交
41
 *                      [up_pad + down_pad, input_hidden_size].
C
chengduoZH 已提交
42
 *
C
chengduoZH 已提交
43
 * \param col           Col data.
C
chengduoZH 已提交
44
 * \param Shape         The shape of Col data,
C
chengduoZH 已提交
45
 *                      [minibatch, context_length * input_hidden_size].
C
chengduoZH 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
 *
 * For a mini-batch of 2 variable lengths sentences, containing 3, and 1
 * time-steps:
 *
 * Assumed input (X) is a [4, M, N] float LoDTensor, and X->lod()[0] = [0, 3,
 * 4].
 * Besides, for the sake of simplicity, we assume M=1 and N=2.
 *
 * X = [[a1, a2;
 *       b1, b2;
 *       c1, c2]
 *      [d1, d2]]
 *
 * This is to say that input (X) has 4 words and the dimension of each word
 * representation is 2.
 *
 * - Case1:
 * If context_start is -1 and padding_trainable is false, we use zero to pad
 * instead of learned weight to pad,
 * and the context_lenth is 3, the output (Out) is:
 *
 * Out =[[0,  0,  a1, a2, b1, b2;
 *        a1, a2, b1, b2, c1, c2;
 *        b1, b2, c1, c2, 0,  0 ]
 *       [0,  0,  d1, d2, 0,  0 ]]
 *
 * - Case2:
 * If context_start is -1 and padding_trainable is true, we use learned weight
 * to pad,
 * and the context_lenth is 3, the output (Out) is:
 *
 * Out = [[w1, w2, a1, a2, b1, b2;
 *         a1, a2, b1, b2, c1, c2;
 *         b1, b2, c1, c2, w3, w4]
 *        [w1, w2, d1, d2, w3, w4]]
C
chengduoZH 已提交
81 82 83 84
 *
 */

template <typename Place, typename T>
C
chengduoZH 已提交
85
class ContextProjectFunctor {
C
chengduoZH 已提交
86 87
 public:
  void operator()(const platform::DeviceContext& context,
C
chengduoZH 已提交
88 89
                  framework::LoDTensor& in, framework::Tensor& padding_data,
                  framework::Tensor& col, bool padding_trainable,
C
chengduoZH 已提交
90
                  int context_start, int context_length, int context_stride,
C
chengduoZH 已提交
91 92 93
                  int up_pad, int down_pad, bool gradient, bool input_grad,
                  bool pad_grad) {
    auto lod_level_0 = in.lod()[0];
C
chengduoZH 已提交
94 95 96 97

    paddle::operators::math::Im2ColFunctor<
        paddle::operators::math::ColFormat::kOCF, Place, float>
        im2col_ocf;
C
chengduoZH 已提交
98 99 100
    paddle::operators::math::Col2ImFunctor<
        paddle::operators::math::ColFormat::kOCF, Place, float>
        col2im_ocf;
C
chengduoZH 已提交
101 102 103

    int input_row_begin, input_row_end;
    int sequence_height, sequence_width;
C
chengduoZH 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    sequence_width = in.dims()[1];
    input_grad = gradient && input_grad;
    pad_grad = gradient && pad_grad;

    if (!gradient || input_grad) {
      for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
        input_row_begin = (context_start > 0)
                              ? static_cast<int>(lod_level_0[i]) + context_start
                              : static_cast<int>(lod_level_0[i]);
        input_row_end = static_cast<int>(lod_level_0[i + 1]);

        framework::Tensor out_t =
            col.Slice(static_cast<int>(lod_level_0[i]),
                      static_cast<int>(lod_level_0[i + 1]));

        sequence_height = static_cast<int>(out_t.dims()[0]);

        if (input_row_begin < input_row_end) {
          framework::Tensor in_t = in.Slice(input_row_begin, input_row_end);

          std::vector<int64_t> output_shape(
              {sequence_height, 1, 1, context_length,
               sequence_width});  // output_height, output_width,
          // input_channels, filter_height, filter_width

          out_t.Resize(framework::make_ddim(output_shape));

          std::vector<int64_t> input_shape(
              {1, input_row_end - input_row_begin,
               sequence_width});  // input_channels, input_height, input_width
          in_t.Resize(framework::make_ddim(input_shape));

          if (gradient) {
            col2im_ocf(context, in_t, out_t,
                       /*stride_height*/ context_stride, /*stride_width*/ 1,
                       up_pad, down_pad, 0, 0);
          } else {
            im2col_ocf(context, in_t, out_t,
                       /*stride_height*/ context_stride, /*stride_width*/ 1,
                       up_pad, down_pad, 0, 0);
          }
C
chengduoZH 已提交
145
          out_t.Resize({sequence_height, context_length * sequence_width});
C
chengduoZH 已提交
146
        }
C
chengduoZH 已提交
147
      }
C
chengduoZH 已提交
148 149
    }
    if (!gradient || pad_grad) {
C
chengduoZH 已提交
150
      if (padding_trainable) {
C
chengduoZH 已提交
151 152 153 154 155 156 157 158
        for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
          framework::Tensor out_t =
              col.Slice(static_cast<int>(lod_level_0[i]),
                        static_cast<int>(lod_level_0[i + 1]));

          sequence_height = static_cast<int>(out_t.dims()[0]);

          // add up trainable data
C
chengduoZH 已提交
159
          out_t.Resize({sequence_height * context_length, sequence_width});
C
chengduoZH 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

          if (up_pad > 0) {  // add up pad
            int padding_rows = std::min(
                up_pad, static_cast<int>(lod_level_0[i + 1] - lod_level_0[i]));

            for (int k = 0; k < padding_rows; ++k) {
              int padding_size =
                  k + context_length < up_pad ? context_length : up_pad - k;
              framework::Tensor out_t_sub = out_t.Slice(
                  k * context_length, k * context_length + padding_size);
              framework::Tensor w_sub = padding_data.Slice(k, k + padding_size);
              // in this block, using EigenVector<T>::Flatten is ok too.
              auto out_t_sub_e = EigenMatrix<T>::From(out_t_sub);
              auto w_sub_e = EigenMatrix<T>::From(w_sub);
              if (gradient) {
                w_sub_e.device(*context.GetEigenDevice<Place>()) =
                    w_sub_e + out_t_sub_e;
              } else {
                out_t_sub_e.device(*context.GetEigenDevice<Place>()) = w_sub_e;
              }
            }
C
chengduoZH 已提交
181
          }
C
chengduoZH 已提交
182 183 184 185 186 187 188 189 190 191
          if (down_pad > 0) {  // add down pad
            int down_pad_begin_row =
                std::max(
                    0, (sequence_height - context_start - context_length) + 1) +
                1;
            int padding_begin = std::max(0, context_start - sequence_height);
            int padding_size =
                sequence_height - context_start >= context_length
                    ? 1
                    : context_length - (sequence_height - context_start);
C
chengduoZH 已提交
192
            if (context_start >= sequence_height) padding_size = context_length;
C
chengduoZH 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
            int padding_idx = padding_begin;
            for (int t = 0; t + down_pad_begin_row <= sequence_height;
                 ++t, ++padding_size) {
              if (context_start >= sequence_height)
                padding_size = context_length;
              if (padding_size > context_length) {
                padding_size = context_length;
                padding_idx++;
              }
              if (padding_begin > 0 || sequence_height == context_start)
                padding_idx = padding_begin + t;
              framework::Tensor out_t_sub = out_t.Slice(
                  (down_pad_begin_row + t) * context_length - padding_size,
                  (down_pad_begin_row + t) * context_length);
              framework::Tensor w_sub = padding_data.Slice(
                  up_pad + padding_idx, up_pad + padding_idx + padding_size);
              auto out_t_sub_e = EigenMatrix<T>::From(out_t_sub);
              auto w_sub_e = EigenMatrix<T>::From(w_sub);
              if (gradient) {
                w_sub_e.device(*context.GetEigenDevice<Place>()) =
                    w_sub_e + out_t_sub_e;
              } else {
                out_t_sub_e.device(*context.GetEigenDevice<Place>()) = w_sub_e;
              }
C
chengduoZH 已提交
217 218
            }
          }
C
chengduoZH 已提交
219
          out_t.Resize({sequence_height, context_length * sequence_width});
C
chengduoZH 已提交
220 221 222 223 224 225 226 227 228
        }
      }
    }
  }
};

}  // namespace math
}  // namespace operators
}  // namespace paddle