gather.h 2.7 KB
Newer Older
Z
Zhuoyuan 已提交
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

Z
zchen0211 已提交
7
   http://www.apache.org/licenses/LICENSE-2.0
Z
Zhuoyuan 已提交
8 9 10 11 12 13 14 15

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Z
zchen0211 已提交
16
#include <memory.h>
Z
Zhuoyuan 已提交
17
#include <cstring>
Z
zchen0211 已提交
18

Z
Zhuoyuan 已提交
19
#include "paddle/framework/ddim.h"
Z
Zhuoyuan 已提交
20 21 22
#include "paddle/framework/tensor.h"
#include "paddle/platform/place.h"

Z
zchen0211 已提交
23 24
using paddle::framework::Tensor;
using paddle::framework::DDim;
Z
Zhuoyuan 已提交
25

Z
zchen0211 已提交
26 27
namespace paddle {
namespace operators {
Z
Zhuoyuan 已提交
28 29

/* Implementation of CPU copy */
Z
Zhuoyuan 已提交
30
template <typename T>
Z
zchen0211 已提交
31 32
void CPUGather(const T* params, const int* indices, const int slice_size,
               const int index_size, T* output) {
Z
Zhuoyuan 已提交
33 34
  const size_t slice_bytes = slice_size * sizeof(T);

Z
Zhuoyuan 已提交
35 36
  for (size_t i = 0; i < index_size; ++i) {
    int index_ = indices[i];
Z
zchen0211 已提交
37 38
    // copy src[index_] to output[i]
    memcpy(output + i * slice_size, params + index_ * slice_size, slice_bytes);
Z
Zhuoyuan 已提交
39
  }
Z
Zhuoyuan 已提交
40 41 42
}

/* Implementation of GPU copy:
Z
zchen0211 已提交
43 44
  I suppose the GPUDevice& d, contains gpu_id and thread_id
  d = cuda_stream(gpu_id_, stream_id_);
Z
Zhuoyuan 已提交
45
*/
Z
Zhuoyuan 已提交
46
template <typename T>
Z
zchen0211 已提交
47 48
void GPUGather(const T* src, const int* index, const int slice_size,
               const int index_size, T* output);
Z
Zhuoyuan 已提交
49

Z
zchen0211 已提交
50 51 52 53 54 55
/**
 * Return a new tensor from source tensor, gathered according to index
 * input[src]: type-T source Tensor
 * input[index]: type-int index Tensor (1-D)
 * return: output tensor
 */
Z
Zhuoyuan 已提交
56
template <typename T>
Z
zchen0211 已提交
57
void Gather(const platform::Place& place, const paddle::framework::Tensor* src,
Z
zchen0211 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
            const paddle::framework::Tensor* index,
            paddle::framework::Tensor* output) {
  // check index of shape 1-D
  PADDLE_ENFORCE(index->dims().size() == 1);
  int index_size = index->dims()[0];

  auto src_dims = src->dims();
  DDim output_dims(src_dims);
  output_dims[0] = index_size;

  // slice size
  int slice_size = 1;
  for (size_t i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i];

  // Gathering
  if (platform::is_cpu_place(place)) {
Z
zchen0211 已提交
74
    CPUGather<T>(src->data<T>(), index->data<int>(), slice_size, index_size,
Z
zchen0211 已提交
75 76 77 78 79 80 81 82
                 output->data<T>());
  } else {
    // init for GPU
    // output_arr = output->mutable_data<T>(output_dims, platform::GPUPlace());
    // how to specialize device??
    // GPUGather(
    //    d, src->data(), index->data(), slice_size,
    //    new_tensor->mutable_data());
Z
Zhuoyuan 已提交
83
  }
Z
Zhuoyuan 已提交
84
}
Z
zchen0211 已提交
85 86 87

}  // namespace operators
}  // namespace paddle