gather.h 2.8 KB
Newer Older
Z
Zhuoyuan 已提交
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

Z
zchen0211 已提交
7
   http://www.apache.org/licenses/LICENSE-2.0
Z
Zhuoyuan 已提交
8 9 10 11 12 13 14 15

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Z
zchen0211 已提交
16
#include <memory.h>
Z
Zhuoyuan 已提交
17
#include <cstring>
Z
zchen0211 已提交
18

Z
Zhuoyuan 已提交
19
#include "paddle/framework/ddim.h"
Z
Zhuoyuan 已提交
20 21 22
#include "paddle/framework/tensor.h"
#include "paddle/platform/place.h"

Z
zchen0211 已提交
23 24
using paddle::framework::Tensor;
using paddle::framework::DDim;
Z
Zhuoyuan 已提交
25

Z
zchen0211 已提交
26 27
namespace paddle {
namespace operators {
Z
Zhuoyuan 已提交
28 29

/* Implementation of CPU copy */
Z
Zhuoyuan 已提交
30 31 32 33 34 35
template <typename T>
void CPUGather(const T* params,
               const int* indices,
               const int slice_size,
               const int index_size,
               T* output) {
Z
Zhuoyuan 已提交
36 37
  const size_t slice_bytes = slice_size * sizeof(T);

Z
Zhuoyuan 已提交
38 39
  for (size_t i = 0; i < index_size; ++i) {
    int index_ = indices[i];
Z
zchen0211 已提交
40 41
    // copy src[index_] to output[i]
    memcpy(output + i * slice_size, params + index_ * slice_size, slice_bytes);
Z
Zhuoyuan 已提交
42
  }
Z
Zhuoyuan 已提交
43 44 45
}

/* Implementation of GPU copy:
Z
zchen0211 已提交
46 47
  I suppose the GPUDevice& d, contains gpu_id and thread_id
  d = cuda_stream(gpu_id_, stream_id_);
Z
Zhuoyuan 已提交
48
*/
Z
Zhuoyuan 已提交
49
template <typename T>
Z
zchen0211 已提交
50
void GPUGather(const T* src,
Z
Zhuoyuan 已提交
51 52 53
               const int* index,
               const int slice_size,
               const int index_size,
Z
zchen0211 已提交
54
               T* output);
Z
Zhuoyuan 已提交
55

Z
zchen0211 已提交
56 57 58 59 60 61
/**
 * Return a new tensor from source tensor, gathered according to index
 * input[src]: type-T source Tensor
 * input[index]: type-int index Tensor (1-D)
 * return: output tensor
 */
Z
Zhuoyuan 已提交
62
template <typename T>
Z
zchen0211 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
void Gather(const platform::Place& place,
            const paddle::framework::Tensor* src,
            const paddle::framework::Tensor* index,
            paddle::framework::Tensor* output) {
  // check index of shape 1-D
  PADDLE_ENFORCE(index->dims().size() == 1);
  int index_size = index->dims()[0];

  auto src_dims = src->dims();
  DDim output_dims(src_dims);
  output_dims[0] = index_size;

  // slice size
  int slice_size = 1;
  for (size_t i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i];

  // Gathering
  if (platform::is_cpu_place(place)) {
    CPUGather<T>(src->data<T>(),
                 index->data<int>(),
                 slice_size,
                 index_size,
                 output->data<T>());
  } else {
    // init for GPU
    // output_arr = output->mutable_data<T>(output_dims, platform::GPUPlace());
    // how to specialize device??
    // GPUGather(
    //    d, src->data(), index->data(), slice_size,
    //    new_tensor->mutable_data());
Z
Zhuoyuan 已提交
93
  }
Z
Zhuoyuan 已提交
94
}
Z
zchen0211 已提交
95 96 97

}  // namespace operators
}  // namespace paddle