stack_op.h 3.8 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
S
sneaxiy 已提交
14

X
Xin Pan 已提交
15
#pragma once
S
sneaxiy 已提交
16

17
#include <memory>
X
Xin Pan 已提交
18
#include "paddle/fluid/framework/op_registry.h"
S
sneaxiy 已提交
19 20
#include "paddle/fluid/platform/for_range.h"

X
Xin Pan 已提交
21 22 23
namespace paddle {
namespace operators {

S
sneaxiy 已提交
24 25 26 27 28 29 30 31 32
template <typename VecDxType, typename T>
struct StackGradFunctor {
  HOSTDEVICE StackGradFunctor(const VecDxType &dx, const T *dy, int n, int post)
      : dx_(dx), dy_(dy), n_(n), post_(post) {}

  HOSTDEVICE void operator()(int idx) {
    int i = idx / (n_ * post_);
    int which_x = idx / post_ - i * n_;
    int x_index = i * post_ + idx % post_;
33
    if (dx_[which_x] != nullptr) dx_[which_x][x_index] = dy_[idx];
S
sneaxiy 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
  }

 private:
  VecDxType dx_;
  const T *dy_;
  int n_;
  int post_;
};

template <typename DeviceContext, typename VecDxType, typename T>
static inline void StackGradFunctorForRange(const DeviceContext &ctx,
                                            const VecDxType &dx, const T *dy,
                                            int total_num, int n, int post) {
  platform::ForRange<DeviceContext> for_range(ctx, total_num);
  for_range(StackGradFunctor<VecDxType, T>(dx, dy, n, post));
}

template <typename DeviceContext, typename T>
X
Xin Pan 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
class StackKernel : public framework::OpKernel<T> {
  using Tensor = framework::LoDTensor;

 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto x = ctx.MultiInput<Tensor>("X");
    auto *y = ctx.Output<Tensor>("Y");

    int axis = ctx.Attr<int>("axis");
    if (axis < 0) axis += (x[0]->dims().size() + 1);

    int n = static_cast<int>(x.size());
    auto *y_data = y->mutable_data<T>(ctx.GetPlace());
    std::vector<const T *> x_datas(n);
    for (int i = 0; i < n; i++) x_datas[i] = x[i]->data<T>();

    int pre = 1, post = 1;
    auto &dim = x[0]->dims();
    for (auto i = 0; i < axis; ++i) pre *= dim[i];
    for (auto i = axis; i < dim.size(); ++i) post *= dim[i];

S
sneaxiy 已提交
73
    auto x_data_arr = x_datas.data();
Y
Yihua Xu 已提交
74

Y
Yihua Xu 已提交
75 76 77 78 79 80 81 82 83 84
    size_t x_offset = 0;
    size_t y_offset = 0;
    for (int i = 0; i < pre; i++) {
      for (int j = 0; j < n; j++) {
        std::memcpy(y_data + y_offset, x_data_arr[j] + x_offset,
                    post * sizeof(T));
        y_offset += post;
      }
      x_offset += post;
    }
X
Xin Pan 已提交
85 86 87
  }
};

S
sneaxiy 已提交
88
template <typename DeviceContext, typename T>
X
Xin Pan 已提交
89 90 91 92 93 94 95 96 97 98 99
class StackGradKernel : public framework::OpKernel<T> {
  using Tensor = framework::LoDTensor;

 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto *dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
    auto dx = ctx.MultiOutput<Tensor>(framework::GradVarName("X"));
    int axis = ctx.Attr<int>("axis");
    if (axis < 0) axis += dy->dims().size();
    int n = dy->dims()[axis];
    std::vector<T *> dx_datas(n);  // NOLINT
100

S
sneaxiy 已提交
101
    for (int i = 0; i < n; i++) {
102 103 104 105 106
      if (dx[i] == nullptr) {
        dx_datas[i] = nullptr;
      } else {
        dx_datas[i] = dx[i]->mutable_data<T>(ctx.GetPlace());
      }
S
sneaxiy 已提交
107
    }
X
Xin Pan 已提交
108 109 110
    auto dy_data = dy->data<T>();
    int pre = 1;
    for (int i = 0; i < axis; ++i) pre *= dy->dims()[i];
S
sneaxiy 已提交
111 112 113
    int total_num = dy->numel();
    int post = total_num / (n * pre);
    auto &dev_ctx = ctx.template device_context<DeviceContext>();
S
sneaxiy 已提交
114 115
    auto dx_data_arr = dx_datas.data();
    StackGradFunctorForRange(dev_ctx, dx_data_arr, dy_data, total_num, n, post);
X
Xin Pan 已提交
116 117 118 119 120
  }
};

}  // namespace operators
}  // namespace paddle