ContextProjectionOp.cpp 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "ContextProjectionOp.h"
16 17 18 19 20 21
#include "paddle/math/Matrix.h"
#include "paddle/math/Vector.h"

namespace paddle {

template <>
22 23 24
void ContextProjectionForward<DEVICE_TYPE_CPU>(CpuMatrix& out_mat,
                                               const CpuMatrix& input_mat,
                                               const CpuMatrix& weight_mat,
25
                                               const CpuIVector& seq_vec,
26 27
                                               size_t context_length,
                                               int context_start,
28
                                               size_t begin_pad) {
29 30 31 32 33 34 35 36 37 38 39
  const int* starts = seq_vec.getData();
  const size_t num_sequences = seq_vec.getSize() - 1;
  for (size_t i = 0; i < num_sequences; ++i) {
    for (size_t j = 0; j < context_length; ++j) {
      int begin = starts[i] + context_start + j;
      int end = starts[i + 1] + context_start + j;
      int dst_begin = starts[i];
      int dst_end = starts[i + 1];
      if (begin < starts[i]) {
        int64_t pad_size =
            std::min(starts[i] - begin, starts[i + 1] - starts[i]);
40 41 42 43 44
        MatrixPtr mat = out_mat.subMatrix(starts[i], pad_size);
        if (weight_mat) {
          MatrixPtr sub =
              const_cast<CpuMatrix&>(weight_mat).subMatrix(j, pad_size);
          mat->addAtOffset(*sub, j * input_mat.getWidth());
45 46 47 48 49 50 51
        }
        dst_begin = starts[i] + pad_size;
        begin = starts[i];
      }
      if (end > starts[i + 1]) {
        int64_t pad_size =
            std::min(end - starts[i + 1], starts[i + 1] - starts[i]);
52 53 54 55 56 57 58
        MatrixPtr mat = out_mat.subMatrix(starts[i + 1] - pad_size, pad_size);
        if (weight_mat) {
          MatrixPtr sub =
              const_cast<CpuMatrix&>(weight_mat)
                  .subMatrix(begin_pad + context_start + j - pad_size,
                             pad_size);
          mat->addAtOffset(*sub, j * input_mat.getWidth());
59 60 61 62 63
        }
        dst_end = starts[i + 1] - pad_size;
        end = starts[i + 1];
      }
      if (end <= begin) continue;
64 65 66 67
      MatrixPtr src =
          const_cast<CpuMatrix&>(input_mat).subMatrix(begin, end - begin);
      MatrixPtr dst = out_mat.subMatrix(dst_begin, dst_end - dst_begin);
      dst->addAtOffset(*src, j * input_mat.getWidth());
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
    }
  }
}

/**
 * \param inputs[0] input value.
 * \param inputs[1] input weight.
 * \param inputs[2] input sequence.
 * \param outputs[0] output value.
 */
template <DeviceType Device>
class ContextProjectionForwardFunc : public FunctionBase {
public:
  void init(const FuncConfig& config) override {
    context_length_ = config.get<size_t>("context_length");
    context_start_ = config.get<int>("context_start");
    begin_pad_ = config.get<size_t>("begin_pad");
  }

87 88 89
  void calc(const BufferArgs& inputs,
            const BufferArgs& outputs,
            const BufferArgs& inouts) override {
90 91 92 93
    CHECK_EQ(3, inputs.size());
    CHECK_EQ(1, outputs.size());
    CHECK_EQ(0, inouts.size());

94 95 96 97 98
    CHECK(outputs[0].data() && inputs[0].data() && inputs[2].data());
    CHECK_EQ(outputs[0].shape().ndims(), 2);
    CHECK_EQ(inputs[0].shape().ndims(), 2);
    CHECK_EQ(inputs[1].shape().ndims(), 2);
    CHECK_EQ(inputs[2].shape().ndims(), 1);
99
    /// dim of output = dim of input * context_length
100
    CHECK_EQ(outputs[0].shape()[1], inputs[0].shape()[1] * context_length_);
101
    /// dim of input == dim of weight
102
    CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]);
103
    /// input and output has the same batch_size
104 105 106 107 108 109 110 111 112 113 114
    CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]);

    auto out_mat = outputs[0].matrix<Device>();
    auto in_mat = inputs[0].matrix<Device>();
    auto w_mat = !inputs[1].data()
                     ? typename Tensor<real, Device>::Matrix(nullptr, 0, 0)
                     : inputs[1].matrix<Device>();
    auto seq_vec = inputs[2].vector<int, Device>();
    ContextProjectionForward<Device>(out_mat,
                                     in_mat,
                                     w_mat,
115
                                     seq_vec,
116 117
                                     context_length_,
                                     context_start_,
118
                                     begin_pad_);
119 120 121 122 123 124 125 126
  }

private:
  size_t context_length_;
  int context_start_;
  size_t begin_pad_;
};

127
template <>
128 129 130
void ContextProjectionBackward<DEVICE_TYPE_CPU>(CpuMatrix& out_grad_mat,
                                                CpuMatrix& in_grad_mat,
                                                CpuMatrix& w_grad_mat,
131
                                                const CpuIVector& seq_vec,
132 133 134
                                                size_t context_length,
                                                int context_start,
                                                size_t begin_pad,
135 136
                                                bool is_padding,
                                                size_t total_pad) {
137 138
  size_t input_dim = in_grad_mat ? in_grad_mat.getWidth()
                                 : w_grad_mat ? w_grad_mat.getWidth() : 0;
139 140 141 142 143 144 145 146 147 148 149 150
  const int* starts = seq_vec.getData();
  size_t num_sequences = seq_vec.getSize() - 1;
  for (size_t i = 0; i < num_sequences; ++i) {
    for (size_t j = 0; j < context_length; ++j) {
      int begin = starts[i] + context_start + j;
      int end = starts[i + 1] + context_start + j;
      int dst_begin = starts[i];
      int dst_end = starts[i + 1];
      if (begin < starts[i]) {
        int64_t pad_size =
            std::min(starts[i] - begin, starts[i + 1] - starts[i]);
        if (is_padding && w_grad_mat) {
151 152
          MatrixPtr mat = out_grad_mat.subMatrix(starts[i], pad_size);
          MatrixPtr sub = w_grad_mat.subMatrix(j, pad_size);
153 154 155 156 157 158 159 160 161 162
          sub->addAtOffset(*mat, j * input_dim);
        }
        dst_begin = starts[i] + pad_size;
        begin = starts[i];
      }
      if (end > starts[i + 1]) {
        int64_t pad_size =
            std::min(end - starts[i + 1], starts[i + 1] - starts[i]);
        if (is_padding && w_grad_mat) {
          MatrixPtr mat =
163 164
              out_grad_mat.subMatrix(starts[i + 1] - pad_size, pad_size);
          MatrixPtr sub = w_grad_mat.subMatrix(
165 166 167 168 169 170 171 172
              begin_pad + context_start + j - pad_size, pad_size);
          sub->addAtOffset(*mat, j * input_dim);
        }
        dst_end = starts[i + 1] - pad_size;
        end = starts[i + 1];
      }
      if (end <= begin) continue;
      if (!in_grad_mat) continue;
173 174
      MatrixPtr src = in_grad_mat.subMatrix(begin, end - begin);
      MatrixPtr dst = out_grad_mat.subMatrix(dst_begin, dst_end - dst_begin);
175 176 177 178 179 180
      src->addAtOffset(*dst, j * input_dim);
    }
  }
}

/**
181 182
 * \param inputs[0] input grad.
 * \param inputs[1] weight grad.
183 184 185 186 187 188 189 190 191 192 193
 * \param inputs[2] input sequence.
 * \param outputs[0] output value.
 */
template <DeviceType Device>
class ContextProjectionBackwardFunc : public FunctionBase {
public:
  void init(const FuncConfig& config) override {
    context_length_ = config.get<size_t>("context_length");
    context_start_ = config.get<int>("context_start");
    begin_pad_ = config.get<size_t>("begin_pad");
    is_padding_ = config.get<bool>("is_padding");
194
    total_pad_ = config.get<size_t>("total_pad");
195 196
  }

197 198 199
  void calc(const BufferArgs& inputs,
            const BufferArgs& outputs,
            const BufferArgs& inouts) override {
200 201 202 203
    CHECK_EQ(3, inputs.size());
    CHECK_EQ(1, outputs.size());
    CHECK_EQ(0, inouts.size());

204 205 206 207 208
    CHECK(outputs[0].data() && inputs[2].data());
    CHECK_EQ(outputs[0].shape().ndims(), 2);
    CHECK_EQ(inputs[0].shape().ndims(), 2);
    CHECK_EQ(inputs[1].shape().ndims(), 2);
    CHECK_EQ(inputs[2].shape().ndims(), 1);
209 210

    /// dim of input == dim of weight
211
    CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]);
212
    /// input and output has the same batch_size
213
    CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]);
214
    /// dim of output = dim of input * context_length
215
    CHECK_EQ(outputs[0].shape()[1], inputs[0].shape()[1] * context_length_);
216

217
    auto out_grad_mat = outputs[0].matrix<Device>();
218
    auto in_grad_mat =
219 220 221 222 223 224 225 226 227
        !inputs[0].data() ? typename Tensor<real, Device>::Matrix(nullptr, 0, 0)
                          : inputs[0].matrix<Device>();
    auto w_grad_mat = !inputs[1].data()
                          ? typename Tensor<real, Device>::Matrix(nullptr, 0, 0)
                          : inputs[1].matrix<Device>();
    auto seq_vec = inputs[2].vector<int, Device>();
    ContextProjectionBackward<Device>(out_grad_mat,
                                      in_grad_mat,
                                      w_grad_mat,
228
                                      seq_vec,
229 230 231
                                      context_length_,
                                      context_start_,
                                      begin_pad_,
232 233
                                      is_padding_,
                                      total_pad_);
234 235 236 237 238 239 240
  }

private:
  size_t context_length_;
  int context_start_;
  size_t begin_pad_;
  bool is_padding_;
241
  size_t total_pad_;
242 243
};

244
#if 0
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/**
 * \param inputs[0] input grad.
 * \param inputs[1] input sequence.
 * \param outputs[0] output grad.
 */
template <DeviceType Device>
class ContextProjectionBackwardDataFunc : public FunctionBase {
public:
  void init(const FuncConfig& config) override {
    context_length_ = config.get<size_t>("context_length");
    context_start_ = config.get<int>("context_start");
  }

  void calc(const Arguments& inputs,
            const Arguments& outputs,
            const Arguments& inouts) override {
    CHECK_EQ(2, inputs.size());
    CHECK_EQ(1, outputs.size());
    CHECK_EQ(0, inouts.size());
264

265 266 267 268 269 270 271
    CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData());
    CHECK_EQ(outputs[0].dims_.size(), 2);
    CHECK_EQ(inputs[0].dims_.size(), 2);
    CHECK_EQ(inputs[1].dims_.size(), 1);
    CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_);
    /// input and output has the same batch_size
    CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]);
272

273 274 275 276 277 278 279 280 281 282
    auto out_grad_mat = std::make_shared<typename MatrixT<Device>::type>(
        outputs[0].getData(), outputs[0].dims_[0], outputs[0].dims_[1]);
    const auto in_grad_mat = std::make_shared<typename MatrixT<Device>::type>(
        inputs[0].getData(), inputs[0].dims_[0], inputs[0].dims_[1]);
    typename SequenceT<Device>::type seq_vec(
        inputs[1].dims_[0], reinterpret_cast<int*>(inputs[1].getData()));

    ContextProjectionBackwardData<Device>(out_grad_mat.get(),
                                          in_grad_mat.get(),
                                          seq_vec,
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
                                          context_length_,
                                          context_start_);
  }

private:
  size_t context_length_;
  int context_start_;
};

/**
 * \param inputs[0] weight grad.
 * \param inputs[1] input sequence.
 * \param outputs[0] output grad.
 */
template <DeviceType Device>
class ContextProjectionBackwardWeightFunc : public FunctionBase {
public:
  void init(const FuncConfig& config) override {
    context_length_ = config.get<size_t>("context_length");
    context_start_ = config.get<int>("context_start");
    begin_pad_ = config.get<size_t>("begin_pad");
    total_pad_ = config.get<size_t>("total_pad");
  }

  void calc(const Arguments& inputs,
            const Arguments& outputs,
            const Arguments& inouts) override {
    CHECK_EQ(2, inputs.size());
    CHECK_EQ(1, outputs.size());
    CHECK_EQ(0, inouts.size());

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
    CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData());
    CHECK_EQ(outputs[0].dims_.size(), 2);
    CHECK_EQ(inputs[0].dims_.size(), 2);
    CHECK_EQ(inputs[1].dims_.size(), 1);
    CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_);

    auto out_grad_mat = std::make_shared<typename MatrixT<Device>::type>(
        outputs[0].getData(), outputs[0].dims_[0], outputs[0].dims_[1]);
    auto w_grad_mat = std::make_shared<typename MatrixT<Device>::type>(
        inputs[0].getData(), inputs[0].dims_[0], inputs[0].dims_[1]);
    typename SequenceT<Device>::type seq_vec(
        inputs[1].dims_[0], reinterpret_cast<int*>(inputs[1].getData()));

    ContextProjectionBackwardWeight<Device>(out_grad_mat.get(),
                                            w_grad_mat.get(),
                                            seq_vec,
330 331 332 333 334 335 336 337 338 339 340 341
                                            context_length_,
                                            context_start_,
                                            total_pad_,
                                            begin_pad_);
  }

private:
  size_t context_length_;
  int context_start_;
  size_t begin_pad_;
  size_t total_pad_;
};
342
#endif
343

344 345 346
REGISTER_TYPED_FUNC(ContextProjectionForward,
                    CPU,
                    ContextProjectionForwardFunc);
347 348 349
REGISTER_TYPED_FUNC(ContextProjectionBackward,
                    CPU,
                    ContextProjectionBackwardFunc);
350 351 352 353
#ifndef PADDLE_ONLY_CPU
REGISTER_TYPED_FUNC(ContextProjectionForward,
                    GPU,
                    ContextProjectionForwardFunc);
354 355 356
REGISTER_TYPED_FUNC(ContextProjectionBackward,
                    GPU,
                    ContextProjectionBackwardFunc);
357
#if 0
358 359 360 361 362 363
REGISTER_TYPED_FUNC(ContextProjectionBackwardData,
                    GPU,
                    ContextProjectionBackwardDataFunc);
REGISTER_TYPED_FUNC(ContextProjectionBackwardWeight,
                    GPU,
                    ContextProjectionBackwardWeightFunc);
364
#endif
365
#endif
366
}  // namespace paddle