sequence_pool_op.h 6.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
L
Luo Tao 已提交
18
#include "paddle/operators/math/math_function.h"
19 20 21 22 23 24

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
25 26 27
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
28 29 30 31
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;

32 33 34 35 36 37 38 39 40
enum SeqPoolType {
  AVERAGE = 0,
  SUM = 1,
  SQRT = 2,  // square_root_n
  MAX = 3,
  LAST = 4,
  FIRST = 5
};

41
template <typename Place, typename T>
Y
Yu Yang 已提交
42
class SequencePoolKernel : public framework::OpKernel<T> {
43 44 45 46
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* in = context.Input<LoDTensor>("X");
    auto* out = context.Output<LoDTensor>("Out");
47
    int strategy = context.Attr<int>("strategy");
48 49

    auto dims = in->dims();
Q
Qiao Longfei 已提交
50
    auto lod = in->lod();
51 52
    int64_t w = in->numel() / dims[0];

Q
Qiao Longfei 已提交
53 54 55 56 57 58 59 60 61 62 63
    // InferShape by lod
    PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now.");
    PADDLE_ENFORCE_GE(
        dims[0],
        /*batch size = */ static_cast<int64_t>(lod[0].size() - 1),
        "The first dimension of Input(X) must be large than batch size.");
    dims[0] = lod[0].size() - 1;
    out->Resize({dims});

    auto lod_level_0 = lod[0];

64 65
    out->mutable_data<T>(context.GetPlace());
    auto place = context.GetEigenDevice<Place>();
Q
Qiao Longfei 已提交
66 67 68
    for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
      Tensor in_t = in->Slice<T>(static_cast<int>(lod_level_0[i]),
                                 static_cast<int>(lod_level_0[i + 1]));
69
      Tensor out_t = out->Slice<T>(i, i + 1);
Q
Qiao Longfei 已提交
70
      int64_t h = static_cast<int64_t>(lod_level_0[i + 1] - lod_level_0[i]);
71 72
      auto in_e = EigenMatrix<T>::From(in_t, framework::make_ddim({h, w}));
      auto out_e = EigenVector<T>::Flatten(out_t);
73 74 75 76 77 78 79 80

      switch (strategy) {
        case AVERAGE:
          out_e.device(place) = in_e.mean(Eigen::array<int, 1>({{0}}));
          break;
        case SUM:
          out_e.device(place) = in_e.sum(Eigen::array<int, 1>({{0}}));
          break;
L
Luo Tao 已提交
81 82 83 84
        case SQRT:
          out_e.device(place) = in_e.sum(Eigen::array<int, 1>({{0}})) /
                                std::sqrt(static_cast<T>(h));
          break;
L
Luo Tao 已提交
85 86 87
        case MAX:
          out_e.device(place) = in_e.maximum(Eigen::array<int, 1>({{0}}));
          break;
L
Luo Tao 已提交
88 89 90 91 92 93
        case LAST:
          out_e.device(place) = in_e.chip(h - 1, 0);
          break;
        case FIRST:
          out_e.device(place) = in_e.chip(0, 0);
          break;
94
        default:
L
Luo Tao 已提交
95
          PADDLE_THROW("unsupported pooling strategy");
96
      }
97 98 99 100 101
    }
  }
};

template <typename Place, typename T>
Y
Yu Yang 已提交
102
class SequencePoolGradKernel : public framework::OpKernel<T> {
103 104
 public:
  void Compute(const framework::ExecutionContext& context) const override {
105
    auto* in = context.Input<LoDTensor>("X");
L
Luo Tao 已提交
106
    auto* out = context.Input<LoDTensor>("Out");
107
    auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
L
Luo Tao 已提交
108
    auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
109
    int strategy = context.Attr<int>("strategy");
110 111

    auto dims = in->dims();
112
    auto lod = in->lod()[0];
113 114 115
    int64_t w = in->numel() / dims[0];

    in_g->mutable_data<T>(context.GetPlace());
L
Luo Tao 已提交
116 117
    if (strategy == LAST || strategy == FIRST) {
      // set X@Grad be zero at first when strategy is LAST/FIRST
Q
qijun 已提交
118 119
      math::SetConstant<Place, T> functor;
      functor(context.device_context(), in_g, 0);
L
Luo Tao 已提交
120
    }
121
    auto place = context.GetEigenDevice<Place>();
122 123 124
    for (int i = 0; i < static_cast<int>(lod.size()) - 1; ++i) {
      auto in_g_t = in_g->Slice<T>(static_cast<int>(lod[i]),
                                   static_cast<int>(lod[i + 1]));
125
      auto out_g_t = out_g->Slice<T>(i, i + 1);
126
      int64_t h = static_cast<int64_t>(lod[i + 1] - lod[i]);
127 128
      auto in_g_e = EigenMatrix<T>::From(in_g_t, {h, w});
      auto out_g_e = EigenMatrix<T>::From(out_g_t, {1, w});
129
      Eigen::DSizes<int, 2> bcast(h, 1);
130 131 132 133 134 135 136 137

      switch (strategy) {
        case AVERAGE:
          in_g_e.device(place) = (out_g_e / static_cast<T>(h)).broadcast(bcast);
          break;
        case SUM:
          in_g_e.device(place) = (out_g_e).broadcast(bcast);
          break;
L
Luo Tao 已提交
138 139 140 141
        case SQRT:
          in_g_e.device(place) =
              (out_g_e / std::sqrt(static_cast<T>(h))).broadcast(bcast);
          break;
L
Luo Tao 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154
        case MAX: {
          auto in_t = in->Slice<T>(static_cast<int>(lod[i]),
                                   static_cast<int>(lod[i + 1]));
          auto out_t = out->Slice<T>(i, i + 1);
          auto in_e = EigenMatrix<T>::From(in_t, {h, w});
          auto out_e = EigenMatrix<T>::From(out_t, {1, w});
          auto equals = in_e == out_e.broadcast(bcast);
          auto ones = in_g_e.constant(1);
          auto zeros = in_g_e.constant(0);
          in_g_e.device(place) =
              out_g_e.broadcast(bcast) * equals.select(ones, zeros);
          break;
        }
L
Luo Tao 已提交
155 156 157 158 159 160
        case LAST:
          in_g_e.chip(h - 1, 0).device(place) = out_g_e;
          break;
        case FIRST:
          in_g_e.chip(0, 0).device(place) = out_g_e;
          break;
161
        default:
L
Luo Tao 已提交
162
          PADDLE_THROW("unsupported pooling strategy");
163
      }
164 165 166 167 168 169
    }
  }
};

}  // namespace operators
}  // namespace paddle