expand_op.h 9.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
yangyaming 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
yangyaming 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
yangyaming 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
yangyaming 已提交
14 15 16

#pragma once

17 18
#include <vector>

Y
yangyaming 已提交
19 20 21 22 23 24
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
Y
Yi Wang 已提交
25 26 27
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
Y
yangyaming 已提交
28

29 30
#define MAX_RANK_SUPPORTED 6

Y
yangyaming 已提交
31 32 33 34 35 36
#define EXPAND_TEMPLATE(z, n, data) \
  case n + 1: {                     \
    Expand<n + 1>(context);         \
    break;                          \
  }
#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~)
W
wangchaochaohu 已提交
37
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
Y
yangyaming 已提交
38 39 40 41 42
#define EXPAND_GRAD_CASE(n)                                        \
  case n: {                                                        \
    ExpandBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \
    break;                                                         \
  }
Y
yangyaming 已提交
43
#define EXPAND_GRAD_TEMPLATE(z, n, data) \
Y
yangyaming 已提交
44
  BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), )
Y
yangyaming 已提交
45
#define REP_EXPAND_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_GRAD_TEMPLATE, ~)
Y
yangyaming 已提交
46 47 48

namespace paddle {
namespace operators {
49 50
inline std::vector<int> get_expand_times(
    const framework::ExecutionContext& ctx) {
L
liym27 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63
  if (ctx.HasInput("ExpandTimes")) {
    auto* expand_tensor = ctx.Input<framework::LoDTensor>("ExpandTimes");
    auto* expand_data = expand_tensor->data<int>();
    framework::Tensor cpu_expand_tensor;
    if (platform::is_gpu_place(expand_tensor->place())) {
      TensorCopySync(*expand_tensor, platform::CPUPlace(), &cpu_expand_tensor);
      expand_data = cpu_expand_tensor.data<int>();
    }
    auto vec_epxand_times =
        std::vector<int>(expand_data, expand_data + expand_tensor->numel());
    return vec_epxand_times;
  }

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
  auto list_expand_times_tensor =
      ctx.MultiInput<framework::Tensor>("expand_times_tensor");
  if (list_expand_times_tensor.size() > 0) {
    // get tensor from
    std::vector<int> vec_epxand_times;
    for (size_t i = 0; i < list_expand_times_tensor.size(); ++i) {
      auto tensor = list_expand_times_tensor[i];
      if (platform::is_gpu_place(tensor->place())) {
        framework::Tensor temp;
        TensorCopySync(*tensor, platform::CPUPlace(), &temp);
        vec_epxand_times.push_back(*temp.data<int32_t>());
      } else {
        vec_epxand_times.push_back(*tensor->data<int32_t>());
      }
    }

    return vec_epxand_times;
  } else {
    return ctx.Attr<std::vector<int>>("expand_times");
  }
}
Y
yangyaming 已提交
85

Y
yangyaming 已提交
86
using Tensor = framework::Tensor;
Y
yangyaming 已提交
87 88 89 90 91 92
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
93
using framework::To32BitIndex;
Y
yangyaming 已提交
94

Q
QI JUN 已提交
95
template <typename DeviceContext, typename T>
Y
yangyaming 已提交
96
class ExpandKernel : public framework::OpKernel<T> {
Y
yangyaming 已提交
97 98
 public:
  void Compute(const framework::ExecutionContext& context) const override {
Y
yangyaming 已提交
99
    auto rank = context.Input<Tensor>("X")->dims().size();
100 101 102 103 104 105 106 107 108 109 110 111 112
    PADDLE_ENFORCE_GE(
        rank, 1,
        platform::errors::InvalidArgument(
            "The number of dimensions of the input 'x' for Op(expand) "
            "must be greater than or equal to 1, but the value received is %d.",
            rank));
    PADDLE_ENFORCE_LE(
        rank, MAX_RANK_SUPPORTED,
        platform::errors::InvalidArgument(
            "The number of dimensions of the input 'x' for Op(expand) "
            "must be less than or equal to %d, but the value received is %d.",
            MAX_RANK_SUPPORTED, rank));
    switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) }
Y
yangyaming 已提交
113 114 115 116 117
  }

 protected:
  template <int Rank>
  void Expand(const framework::ExecutionContext& context) const {
Y
yangyaming 已提交
118
    auto* in0 = context.Input<Tensor>("X");
119 120 121

    auto in_dims = in0->dims();
    auto expand_times = get_expand_times(context);
122 123 124 125 126 127 128
    PADDLE_ENFORCE_EQ(
        static_cast<size_t>(in_dims.size()), expand_times.size(),
        platform::errors::InvalidArgument(
            "The number of elements (%d) of 'expand_times' for "
            "Op(expand) must be equal to the number "
            "of dimensions (%d) of the input.",
            expand_times.size(), static_cast<size_t>(in_dims.size())));
Y
yangyaming 已提交
129
    auto* out0 = context.Output<Tensor>("Out");
Y
yangyaming 已提交
130 131 132 133
    Eigen::DSizes<int, Rank> bcast_dims;
    for (size_t i = 0; i < expand_times.size(); ++i) {
      bcast_dims[i] = expand_times[i];
    }
134 135 136 137 138 139 140

    framework::DDim out_dims(in_dims);
    for (size_t i = 0; i < expand_times.size(); ++i) {
      out_dims[i] *= expand_times[i];
    }

    out0->Resize(out_dims);
Y
yangyaming 已提交
141 142 143
    auto x = EigenTensor<T, Rank>::From(*in0);
    out0->mutable_data<T>(context.GetPlace());
    auto y = EigenTensor<T, Rank>::From(*out0);
Q
QI JUN 已提交
144 145
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
146 147 148 149 150 151 152
    // use 32-bit index to speed up
    bool use_32bit_index = y.size() < Eigen::NumTraits<int>::highest();
    if (use_32bit_index) {
      To32BitIndex(y).device(place) = To32BitIndex(x).broadcast(bcast_dims);
    } else {
      y.device(place) = x.broadcast(bcast_dims);
    }
Y
yangyaming 已提交
153 154 155
  }
};

Q
QI JUN 已提交
156
template <typename DeviceContext, typename T>
Y
yangyaming 已提交
157
class ExpandGradKernel : public framework::OpKernel<T> {
Y
yangyaming 已提交
158 159
 public:
  void Compute(const framework::ExecutionContext& context) const override {
Y
yangyaming 已提交
160
    auto* in0 = context.Input<Tensor>("X");
161 162
    // auto& expand_times = context.Attr<std::vector<int>>("expand_times");
    auto expand_times = get_expand_times(context);
Y
yangyaming 已提交
163
    auto x_dims = in0->dims();
W
wangchaochaohu 已提交
164
    // 1. reshape_dims_vec is the broadcast parameter.
165 166 167
    // 2. reduce_dims_vec is the dimension parameter to compute gradients. For
    //    each dimension expanded, the gradients should be summed to original
    //    size.
Y
yangyaming 已提交
168 169 170
    std::vector<int> reshape_dims_vec;
    std::vector<int> reduce_dims_vec;
    for (size_t i = 0; i < expand_times.size(); ++i) {
W
wangchaochaohu 已提交
171 172 173
      reduce_dims_vec.push_back(reshape_dims_vec.size());
      reshape_dims_vec.push_back(expand_times[i]);
      reshape_dims_vec.push_back(x_dims[i]);
Y
yangyaming 已提交
174 175
    }

W
wangchaochaohu 已提交
176 177 178 179 180 181 182 183 184
    int dims = reduce_dims_vec.size();

    bool just_copy = true;
    for (size_t i = 0; i < expand_times.size(); i++) {
      if (expand_times[i] != 1) {
        just_copy = false;
        break;
      }
    }
Y
yangyaming 已提交
185
    // no need reduce, just copy
W
wangchaochaohu 已提交
186
    if (just_copy) {
Y
yangyaming 已提交
187 188
      auto* in0 = context.Input<Tensor>(framework::GradVarName("Out"));
      auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
Y
yangyaming 已提交
189
      out0->mutable_data<T>(context.GetPlace());
Y
Yi Wang 已提交
190 191
      framework::TensorCopy(*in0, context.GetPlace(), context.device_context(),
                            out0);
Y
yangyaming 已提交
192
    } else {
193 194 195 196 197 198 199 200 201 202 203 204 205
      PADDLE_ENFORCE_GE(dims, 1, platform::errors::InvalidArgument(
                                     "The number of dimensions of the input "
                                     "'Out@GRAD' for Op(expand_grad)"
                                     " must be greater than or equal to 1, but "
                                     "the value received is %d.",
                                     dims));
      PADDLE_ENFORCE_LE(dims, MAX_RANK_SUPPORTED,
                        platform::errors::InvalidArgument(
                            "The number of dimensions of the input 'Out@GRAD' "
                            "for Op(expand_grad) must be less than or equal "
                            "to %d, but the value received is %d.",
                            MAX_RANK_SUPPORTED, dims));
      switch (dims) { REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) }
Y
yangyaming 已提交
206
    }
Y
yangyaming 已提交
207 208 209 210 211 212 213
  }

 protected:
  template <int Dims>
  void ExpandBackward(const framework::ExecutionContext& context,
                      const std::vector<int>& reshape_dims_vec,
                      const std::vector<int>& reduce_dims_vec) const {
W
wangchaochaohu 已提交
214 215
    size_t reshape_size = reshape_dims_vec.size();
    size_t reduce_size = reduce_dims_vec.size();
Y
yangyaming 已提交
216
    PADDLE_ENFORCE_EQ(reshape_size, reshape_dims_vec.size(),
217 218 219 220
                      platform::errors::InvalidArgument(
                          "Inconsistent size between template Dims (%d) and "
                          "reshape dimensions (%d).",
                          reshape_size, reshape_dims_vec.size()));
Y
yangyaming 已提交
221
    PADDLE_ENFORCE_EQ(reduce_size, reduce_dims_vec.size(),
222 223 224 225
                      platform::errors::InvalidArgument(
                          "Inconsistent size between template Dims (%d) and "
                          "reduce dimensions (%d).",
                          reduce_size, reduce_dims_vec.size()));
Y
yangyaming 已提交
226 227
    auto* in0 = context.Input<Tensor>(framework::GradVarName("Out"));
    auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
Y
yangyaming 已提交
228 229
    out0->mutable_data<T>(context.GetPlace());
    auto x_grad = EigenVector<T>::Flatten(*out0);
W
wangchaochaohu 已提交
230
    Eigen::DSizes<int, Dims * 2> reshape_dims;
Y
yangyaming 已提交
231 232 233
    for (size_t i = 0; i < reshape_size; ++i) {
      reshape_dims[i] = reshape_dims_vec[i];
    }
W
wangchaochaohu 已提交
234
    Eigen::DSizes<int, Dims> reduce_dims;
Y
yangyaming 已提交
235 236 237 238
    for (size_t i = 0; i < reduce_size; ++i) {
      reduce_dims[i] = reduce_dims_vec[i];
    }
    auto out_grad = EigenVector<T>::Flatten(*in0);
Q
QI JUN 已提交
239 240
    x_grad.device(
        *context.template device_context<DeviceContext>().eigen_device()) =
241 242 243
        out_grad.reshape(reshape_dims)
            .sum(reduce_dims)
            .reshape(x_grad.dimensions());
Y
yangyaming 已提交
244 245 246
  }
};

Y
yangyaming 已提交
247 248
}  // namespace operators
}  // namespace paddle