seq_expand_op.h 5.6 KB
Newer Older
W
wanghaoshuang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#pragma once

#include "paddle/framework/op_registry.h"
W
wanghaoshuang 已提交
18
#include "paddle/memory/memcpy.h"
W
wanghaoshuang 已提交
19
#include "unsupported/Eigen/CXX11/Tensor"
W
wanghaoshuang 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32

namespace paddle {
namespace operators {

using LoDTensor = framework::LoDTensor;

template <typename Place, typename T>
class SeqExpandKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* x = context.Input<LoDTensor>("X");
    auto* out = context.Output<LoDTensor>("Out");
    const T* x_data = x->data<T>();
W
wanghaoshuang 已提交
33 34
    auto x_dims = x->dims();
    auto x_lod = x->lod();
W
wanghaoshuang 已提交
35

W
wanghaoshuang 已提交
36 37 38 39
    framework::Vector<size_t> level;
    size_t num = (x_lod.size() == 0) ? (x->dims()[0] + 1) : x_lod[0].size();
    for (int i = 0; i < num; ++i) {
      level.push_back(i);
W
wanghaoshuang 已提交
40
    }
W
wanghaoshuang 已提交
41
    x_lod.push_back(level);
W
wanghaoshuang 已提交
42 43

    size_t repeat = static_cast<size_t>(context.Attr<int>("repeat"));
W
wanghaoshuang 已提交
44
    framework::Vector<size_t> scales;
W
wanghaoshuang 已提交
45 46
    if (repeat != 0) {
      for (int i = 0; i < x_lod[0].size() - 1; ++i) {
W
wanghaoshuang 已提交
47
        scales.push_back(repeat);
W
wanghaoshuang 已提交
48
      }
W
wanghaoshuang 已提交
49 50 51 52
      std::vector<int64_t> dims = framework::vectorize(x->dims());
      dims[0] = dims[0] * repeat;
      auto out_dims = framework::make_ddim(dims);
      out->Resize(out_dims);
W
wanghaoshuang 已提交
53
    } else {
W
wanghaoshuang 已提交
54 55
      auto* y = context.Input<LoDTensor>("Y");
      auto y_lod = y->lod();
W
wanghaoshuang 已提交
56 57 58 59 60
      auto y_abs_lod = y_lod.ToAbsOffset();
      auto x_abs_lod = x_lod.ToAbsOffset();
      for (int i = 0; i < y_abs_lod[0].size() - 1; ++i) {
        scales.push_back((y_abs_lod[0][i + 1] - y_abs_lod[0][i]) /
                         (x_abs_lod[0][i + 1] - x_abs_lod[0][i]));
W
wanghaoshuang 已提交
61
      }
W
wanghaoshuang 已提交
62
      out->Resize(y->dims());
W
wanghaoshuang 已提交
63
    }
W
wanghaoshuang 已提交
64

W
wanghaoshuang 已提交
65 66 67 68
    framework::Vector<size_t> indexes;
    for (int size_t i = 0; i < x_lod[0]; ++i) {
      indexes[i] = x_lod[0];
    }
W
wanghaoshuang 已提交
69
    framework::LoD out_lod;
W
wanghaoshuang 已提交
70
    auto level0 = framework::expand_lod(indexes, x_lod[0], scales, false);
W
wanghaoshuang 已提交
71 72
    out_lod.push_back(level0);
    for (int i = 1; i < x_lod.size(); ++i) {
W
wanghaoshuang 已提交
73 74 75 76
      for (int j = 0; j < indexes.size(); ++j) {
        indexes[j] = x_lod[i - 1][indexes[j]];
      }
      out_lod.push_back(framework::expand_lod(x_lod[i], indexes, scales, true));
W
wanghaoshuang 已提交
77 78 79 80
    }

    size_t element_len = framework::product(x_dims) / x_dims[0];
    T* out_data = out->mutable_data<T>(context.GetPlace());
W
wanghaoshuang 已提交
81 82

    // copy data
83
    auto place = context.GetPlace();
W
wanghaoshuang 已提交
84
    size_t count = 0;
85 86 87
    if (platform::is_cpu_place(place)) {
      auto& cpu_place = boost::get<platform::CPUPlace>(place);
      for (size_t i = 0; i < scales.size(); ++i) {
W
wanghaoshuang 已提交
88
        count = element_len * (x_abs_lod[0][i + 1] - x_abs_lod[0][i]);
89 90 91 92 93 94
        for (size_t j = 0; j < scales[i]; ++j) {
          memory::Copy(cpu_place, out_data, cpu_place, x_data,
                       sizeof(T) * count);
          out_data += count;
        }
        x_data += count;
W
wanghaoshuang 已提交
95
      }
96 97 98 99 100 101 102
    } else {
#ifdef PADDLE_WITH_CUDA
      auto& gpu_place = boost::get<platform::GPUPlace>(place);
      auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
                        context.device_context())
                        .stream();
      for (size_t i = 0; i < scales.size(); ++i) {
W
wanghaoshuang 已提交
103
        count = element_len * (x_abs_lod[0][i + 1] - x_abs_lod[0][i]);
104 105 106 107 108 109 110 111 112 113
        for (size_t j = 0; j < scales[i]; ++j) {
          memory::Copy(gpu_place, out_data, gpu_place, x_data,
                       sizeof(T) * count, stream);
          out_data += count;
        }
        x_data += count;
      }
#else
      PADDLE_THROW("Paddle is not compiled with GPU");
#endif
W
wanghaoshuang 已提交
114 115
    }

W
wanghaoshuang 已提交
116
    out->set_lod(out_lod);
W
wanghaoshuang 已提交
117 118 119 120 121
    for (size_t i = 0; i < lod.size; i++) {
      for (size_t j = 0; j < lod[i].size(); j++) {
        LOG(INFO) << "lod[" << i << "][" << j "] = " << lod[i][j];
      }
    }
W
wanghaoshuang 已提交
122 123 124 125 126 127 128
  }
};

template <typename Place, typename T>
class SeqExpandGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
W
wanghaoshuang 已提交
129 130 131
    auto* d_out = context.Input<LoDTensor>(framework::GradVarName("Out"));
    auto* x = context.Input<LoDTensor>("X");
    auto* out = context.Input<LoDTensor>("Out");
W
wanghaoshuang 已提交
132
    auto* d_x = context.Output<LoDTensor>(framework::GradVarName("X"));
W
wanghaoshuang 已提交
133
    auto out_lod = out->lod();
W
wanghaoshuang 已提交
134
    auto out_abs_lod = out_lod.ToAbsOffset();
W
wanghaoshuang 已提交
135 136 137 138 139 140
    d_x->set_lod(x->lod());
    const T* d_out_data = d_out->data<T>();
    auto d_out_dims = d_out->dims();
    T* d_x_data = d_x->mutable_data<T>(context.GetPlace());
    size_t element_len = framework::product(d_out_dims) / d_out_dims[0];
    for (size_t i = 0; i < out->NumElements(); ++i) {
W
wanghaoshuang 已提交
141
      size_t ele_count = out_abs_lod[0][i + 1] - out_abs_lod[0][i];
W
wanghaoshuang 已提交
142 143 144 145 146 147 148
      size_t repeat = out->NumElements(0, i);
      Eigen::TensorMap<Eigen::Tensor<const T, 2>> d_out_t(
          d_out_data, static_cast<int>(repeat),
          static_cast<int>((ele_count * element_len) / repeat));
      Eigen::TensorMap<Eigen::Tensor<T, 1>> d_x_t(
          d_x_data, static_cast<int>((ele_count * element_len) / repeat));
      auto place = context.GetEigenDevice<Place>();
149
      d_x_t.device(place) = d_out_t.sum(Eigen::array<int, 1>({{0}}));
W
wanghaoshuang 已提交
150 151 152
      d_out_data += (ele_count * element_len);
      d_x_data += ((ele_count * element_len) / repeat);
    }
W
wanghaoshuang 已提交
153 154 155 156 157
  }
};

}  // namespace operators
}  // namespace paddle