context_project.h 12.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

Y
Yi Wang 已提交
17 18 19
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/math_function.h"
C
chengduoZH 已提交
20 21 22 23 24

namespace paddle {
namespace operators {
namespace math {

C
chengduoZH 已提交
25 26 27
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;

C
chengduoZH 已提交
28
/*
C
chengduoZH 已提交
29
 * \brief Context projection concatenates features in adjacent time-steps in
C
chengduoZH 已提交
30 31 32
 * a sequence. The i-th row of the output is the concatenation of
 * context_length rows of the input. The context_length rows are the
 * consecutive rows from the i+shift_start row.
C
sss  
chengduoZH 已提交
33
 * ContextProjectGradFunctor is the inverse process of ContextProjectFunctor.
C
chengduoZH 已提交
34
 *
C
chengduoZH 已提交
35
 * \param in            Input data.
C
chengduoZH 已提交
36 37
 * \param Shape         The shape of Input data:
 *                        [mini-batch, input_hidden_size].
C
chengduoZH 已提交
38
 *
C
chengduoZH 已提交
39
 * \param padding_data  Padding data.
C
chengduoZH 已提交
40 41
 * \param Shape         The shape of Padding data:
 *                        [up_pad + down_pad, input_hidden_size].
C
chengduoZH 已提交
42
 *
C
chengduoZH 已提交
43
 * \param col           Col data.
C
chengduoZH 已提交
44 45
 * \param Shape         The shape of Col data:
 *                        [mini-batch, context_length * input_hidden_size].
C
chengduoZH 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 *
 * For a mini-batch of 2 variable lengths sentences, containing 3, and 1
 * time-steps:
 *
 * Assumed input (X) is a [4, M, N] float LoDTensor, and X->lod()[0] = [0, 3,
 * 4].
 * Besides, for the sake of simplicity, we assume M=1 and N=2.
 *
 * X = [[a1, a2;
 *       b1, b2;
 *       c1, c2]
 *      [d1, d2]]
 *
 * This is to say that input (X) has 4 words and the dimension of each word
 * representation is 2.
 *
 * - Case1:
C
chengduoZH 已提交
63 64 65
 *   If context_start is -1 and padding_trainable is false, we use zero to pad
 *   instead of learned weight to pad,
 *   and the context_length is 3, the output (Out) is:
C
chengduoZH 已提交
66
 *
C
chengduoZH 已提交
67 68 69 70
 *   Out =[[0,  0,  a1, a2, b1, b2;
 *          a1, a2, b1, b2, c1, c2;
 *          b1, b2, c1, c2, 0,  0 ]
 *          [0,  0, d1, d2, 0,  0 ]]
C
chengduoZH 已提交
71 72
 *
 * - Case2:
C
chengduoZH 已提交
73 74 75
 *   If context_start is -1 and padding_trainable is true, we use learned weight
 *   to pad,
 *   and the context_length is 3, the output (Out) is:
C
chengduoZH 已提交
76
 *
C
chengduoZH 已提交
77 78 79 80
 *   Out = [[w1, w2, a1, a2, b1, b2;
 *           a1, a2, b1, b2, c1, c2;
 *           b1, b2, c1, c2, w3, w4]
 *          [w1, w2, d1, d2, w3, w4]]
C
chengduoZH 已提交
81 82 83
 *
 */

Q
QI JUN 已提交
84
template <typename DeviceContext, typename T>
C
chengduoZH 已提交
85
class ContextProjectFunctor {
C
chengduoZH 已提交
86
 public:
Q
QI JUN 已提交
87
  void operator()(const DeviceContext& context, const LoDTensor& in,
88 89 90 91
                  const Tensor& padding_data, bool padding_trainable,
                  const int context_start, const int context_length,
                  const int context_stride, const int up_pad,
                  const int down_pad, Tensor* col) {
C
chengduoZH 已提交
92
    auto lod_level_0 = in.lod()[0];
C
chengduoZH 已提交
93

Q
QI JUN 已提交
94
    math::Im2ColFunctor<math::ColFormat::kOCF, DeviceContext, float> im2col_ocf;
C
sss  
chengduoZH 已提交
95

C
chengduoZH 已提交
96 97 98
    std::vector<int> dilation({1, 1});
    std::vector<int> padding({up_pad, 0, down_pad, 0});
    std::vector<int> stride({context_stride, 1});
C
chengduoZH 已提交
99

C
sss  
chengduoZH 已提交
100 101 102 103 104 105 106 107 108 109
    int input_row_begin, input_row_end;
    int sequence_height, sequence_width;
    sequence_width = in.dims()[1];

    for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
      input_row_begin = (context_start > 0)
                            ? static_cast<int>(lod_level_0[i]) + context_start
                            : static_cast<int>(lod_level_0[i]);
      input_row_end = static_cast<int>(lod_level_0[i + 1]);

110 111
      Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
                                static_cast<int>(lod_level_0[i + 1]));
C
sss  
chengduoZH 已提交
112 113 114 115

      sequence_height = static_cast<int>(out_t.dims()[0]);

      if (input_row_begin < input_row_end) {
C
chengduoZH 已提交
116
        Tensor in_t = in.Slice(input_row_begin, input_row_end);
C
sss  
chengduoZH 已提交
117 118 119 120 121 122 123 124 125 126 127

        std::vector<int64_t> output_shape(
            {sequence_height, 1, 1, context_length,
             sequence_width});  // output_height, output_width,
        // input_channels, filter_height, filter_width
        out_t.Resize(framework::make_ddim(output_shape));

        std::vector<int64_t> input_shape(
            {1, input_row_end - input_row_begin,
             sequence_width});  // input_channels, input_height, input_width
        in_t.Resize(framework::make_ddim(input_shape));
C
chengduoZH 已提交
128
        im2col_ocf(context, in_t, dilation, stride, padding, &out_t);
C
sss  
chengduoZH 已提交
129 130 131 132 133
        out_t.Resize({sequence_height, context_length * sequence_width});
      }
    }
    if (padding_trainable) {
      for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
134 135
        Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
                                  static_cast<int>(lod_level_0[i + 1]));
C
sss  
chengduoZH 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148

        sequence_height = static_cast<int>(out_t.dims()[0]);

        // add up trainable data
        out_t.Resize({sequence_height * context_length, sequence_width});

        if (up_pad > 0) {  // add up pad
          int padding_rows = std::min(
              up_pad, static_cast<int>(lod_level_0[i + 1] - lod_level_0[i]));

          for (int k = 0; k < padding_rows; ++k) {
            int padding_size =
                k + context_length < up_pad ? context_length : up_pad - k;
C
chengduoZH 已提交
149 150 151
            Tensor out_t_sub = out_t.Slice(k * context_length,
                                           k * context_length + padding_size);
            Tensor w_sub = padding_data.Slice(k, k + padding_size);
Y
Yi Wang 已提交
152 153
            framework::TensorCopy(w_sub, context.GetPlace(), context,
                                  &out_t_sub);
C
sss  
chengduoZH 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
          }
        }
        if (down_pad > 0) {  // add down pad
          int down_pad_begin_row =
              std::max(0,
                       (sequence_height - context_start - context_length) + 1) +
              1;
          int padding_begin = std::max(0, context_start - sequence_height);
          int padding_size =
              sequence_height - context_start >= context_length
                  ? 1
                  : context_length - (sequence_height - context_start);
          if (context_start >= sequence_height) padding_size = context_length;
          int padding_idx = padding_begin;
          for (int t = 0; t + down_pad_begin_row <= sequence_height;
               ++t, ++padding_size) {
            if (context_start >= sequence_height) padding_size = context_length;
            if (padding_size > context_length) {
              padding_size = context_length;
              padding_idx++;
            }
            if (padding_begin > 0 || sequence_height == context_start)
              padding_idx = padding_begin + t;
C
chengduoZH 已提交
177 178

            Tensor out_t_sub = out_t.Slice(
C
sss  
chengduoZH 已提交
179 180
                (down_pad_begin_row + t) * context_length - padding_size,
                (down_pad_begin_row + t) * context_length);
C
chengduoZH 已提交
181
            Tensor w_sub = padding_data.Slice(
C
sss  
chengduoZH 已提交
182
                up_pad + padding_idx, up_pad + padding_idx + padding_size);
Y
Yi Wang 已提交
183 184
            framework::TensorCopy(w_sub, context.GetPlace(), context,
                                  &out_t_sub);
C
sss  
chengduoZH 已提交
185 186 187 188 189 190 191 192
          }
        }
        out_t.Resize({sequence_height, context_length * sequence_width});
      }
    }
  }
};

Q
QI JUN 已提交
193
template <typename DeviceContext, typename T>
C
sss  
chengduoZH 已提交
194 195
class ContextProjectGradFunctor {
 public:
Q
QI JUN 已提交
196
  void operator()(const DeviceContext& context, const LoDTensor& in,
197 198 199 200
                  bool padding_trainable, const int context_start,
                  const int context_length, const int context_stride,
                  const int up_pad, const int down_pad, bool pad_grad,
                  bool input_grad, Tensor* padding_data, Tensor* col) {
C
sss  
chengduoZH 已提交
201 202
    auto lod_level_0 = in.lod()[0];

Q
QI JUN 已提交
203
    math::Col2ImFunctor<math::ColFormat::kOCF, DeviceContext, float> col2im_ocf;
C
chengduoZH 已提交
204

C
chengduoZH 已提交
205 206 207
    std::vector<int> dilation({1, 1});
    std::vector<int> padding({up_pad, 0, down_pad, 0});
    std::vector<int> stride({context_stride, 1});
C
chengduoZH 已提交
208

C
chengduoZH 已提交
209 210
    int input_row_begin, input_row_end;
    int sequence_height, sequence_width;
C
chengduoZH 已提交
211 212
    sequence_width = in.dims()[1];

C
sss  
chengduoZH 已提交
213
    if (input_grad) {
C
chengduoZH 已提交
214 215 216 217 218 219
      for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
        input_row_begin = (context_start > 0)
                              ? static_cast<int>(lod_level_0[i]) + context_start
                              : static_cast<int>(lod_level_0[i]);
        input_row_end = static_cast<int>(lod_level_0[i + 1]);

220 221
        Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
                                  static_cast<int>(lod_level_0[i + 1]));
C
chengduoZH 已提交
222 223 224 225

        sequence_height = static_cast<int>(out_t.dims()[0]);

        if (input_row_begin < input_row_end) {
C
chengduoZH 已提交
226
          Tensor in_t = in.Slice(input_row_begin, input_row_end);
C
chengduoZH 已提交
227 228 229 230 231 232 233 234 235 236 237 238

          std::vector<int64_t> output_shape(
              {sequence_height, 1, 1, context_length,
               sequence_width});  // output_height, output_width,
          // input_channels, filter_height, filter_width
          out_t.Resize(framework::make_ddim(output_shape));

          std::vector<int64_t> input_shape(
              {1, input_row_end - input_row_begin,
               sequence_width});  // input_channels, input_height, input_width
          in_t.Resize(framework::make_ddim(input_shape));

C
chengduoZH 已提交
239
          col2im_ocf(context, out_t, dilation, stride, padding, &in_t);
C
chengduoZH 已提交
240
          out_t.Resize({sequence_height, context_length * sequence_width});
C
chengduoZH 已提交
241
        }
C
chengduoZH 已提交
242
      }
C
chengduoZH 已提交
243
    }
C
sss  
chengduoZH 已提交
244
    if (pad_grad) {
C
chengduoZH 已提交
245
      if (padding_trainable) {
C
chengduoZH 已提交
246
        for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
247 248
          Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
                                    static_cast<int>(lod_level_0[i + 1]));
C
chengduoZH 已提交
249 250

          sequence_height = static_cast<int>(out_t.dims()[0]);
C
chengduoZH 已提交
251
          out_t.Resize({sequence_height * context_length, sequence_width});
C
chengduoZH 已提交
252

C
sss  
chengduoZH 已提交
253
          if (up_pad > 0) {
C
chengduoZH 已提交
254 255 256 257 258 259
            int padding_rows = std::min(
                up_pad, static_cast<int>(lod_level_0[i + 1] - lod_level_0[i]));

            for (int k = 0; k < padding_rows; ++k) {
              int padding_size =
                  k + context_length < up_pad ? context_length : up_pad - k;
C
chengduoZH 已提交
260 261
              Tensor out_t_sub = out_t.Slice(k * context_length,
                                             k * context_length + padding_size);
262
              Tensor w_sub = padding_data->Slice(k, k + padding_size);
Q
QI JUN 已提交
263 264
              axpy<DeviceContext, T>(context, w_sub.numel(), static_cast<T>(1),
                                     out_t_sub.data<T>(), w_sub.data<T>());
C
chengduoZH 已提交
265
            }
C
chengduoZH 已提交
266
          }
C
sss  
chengduoZH 已提交
267
          if (down_pad > 0) {
C
chengduoZH 已提交
268 269 270 271 272 273 274 275 276
            int down_pad_begin_row =
                std::max(
                    0, (sequence_height - context_start - context_length) + 1) +
                1;
            int padding_begin = std::max(0, context_start - sequence_height);
            int padding_size =
                sequence_height - context_start >= context_length
                    ? 1
                    : context_length - (sequence_height - context_start);
C
chengduoZH 已提交
277
            if (context_start >= sequence_height) padding_size = context_length;
C
chengduoZH 已提交
278 279 280 281 282 283 284 285 286 287 288
            int padding_idx = padding_begin;
            for (int t = 0; t + down_pad_begin_row <= sequence_height;
                 ++t, ++padding_size) {
              if (context_start >= sequence_height)
                padding_size = context_length;
              if (padding_size > context_length) {
                padding_size = context_length;
                padding_idx++;
              }
              if (padding_begin > 0 || sequence_height == context_start)
                padding_idx = padding_begin + t;
C
chengduoZH 已提交
289 290

              Tensor out_t_sub = out_t.Slice(
C
chengduoZH 已提交
291 292
                  (down_pad_begin_row + t) * context_length - padding_size,
                  (down_pad_begin_row + t) * context_length);
293
              Tensor w_sub = padding_data->Slice(
C
chengduoZH 已提交
294
                  up_pad + padding_idx, up_pad + padding_idx + padding_size);
Q
QI JUN 已提交
295 296
              axpy<DeviceContext, T>(context, w_sub.numel(), static_cast<T>(1),
                                     out_t_sub.data<T>(), w_sub.data<T>());
C
chengduoZH 已提交
297 298
            }
          }
C
chengduoZH 已提交
299
          out_t.Resize({sequence_height, context_length * sequence_width});
C
chengduoZH 已提交
300 301 302 303 304 305 306 307 308
        }
      }
    }
  }
};

}  // namespace math
}  // namespace operators
}  // namespace paddle