gather.h 4.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Z
Zhuoyuan 已提交
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

Z
zchen0211 已提交
7
   http://www.apache.org/licenses/LICENSE-2.0
Z
Zhuoyuan 已提交
8 9 10 11 12 13 14 15

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Z
zchen0211 已提交
16
#include <memory.h>
Z
Zhuoyuan 已提交
17
#include <cstring>
Z
zchen0211 已提交
18

Y
Yi Wang 已提交
19 20 21 22
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/place.h"
Z
Zhuoyuan 已提交
23

Z
zchen0211 已提交
24 25
namespace paddle {
namespace operators {
Z
Zhuoyuan 已提交
26

27 28
using framework::Tensor;

Z
zchen0211 已提交
29
/**
Z
1 api  
zchen0211 已提交
30
 * A thin wrapper for gathering on cpu tensor
Z
zchen0211 已提交
31 32
 * Return a new tensor from source tensor, gathered according to index
 * input[src]: type-T source Tensor
33
 * input[index]: type-IndexT index Tensor (1-D)
Z
zchen0211 已提交
34 35
 * return: output tensor
 */
36
template <typename T, typename IndexT = int>
37 38
void CPUGather(const platform::DeviceContext& ctx, const Tensor& src,
               const Tensor& index, Tensor* output) {
39 40 41
  PADDLE_ENFORCE_EQ(
      platform::is_cpu_place(ctx.GetPlace()), true,
      platform::errors::PreconditionNotMet("It should be running on the CPU."));
Z
zchen0211 已提交
42
  // check index of shape 1-D
43
  if (index.dims().size() == 2) {
44 45 46 47 48 49
    PADDLE_ENFORCE_EQ(
        index.dims()[1], 1,
        platform::errors::InvalidArgument(
            "index.dims()[1] should be 1 when index.dims().size() = 2"
            "in gather_op, but received value is [%d].",
            index.dims()[1]));
50 51
  } else {
    PADDLE_ENFORCE_EQ(index.dims().size(), 1,
52 53 54 55
                      platform::errors::InvalidArgument(
                          "index.dims().size() should be 1 or 2 in gather_op,"
                          "but received shape's size is [%d].",
                          index.dims().size()));
56
  }
57
  int64_t index_size = index.dims()[0];
Z
zchen0211 已提交
58

59
  auto src_dims = src.dims();
Z
zchen0211 已提交
60

61
  const T* p_src = src.data<T>();
62
  const IndexT* p_index = index.data<IndexT>();
Z
1 api  
zchen0211 已提交
63 64
  T* p_output = output->data<T>();

Z
zchen0211 已提交
65 66
  // slice size
  int slice_size = 1;
Z
zchen0211 已提交
67
  for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i];
Z
zchen0211 已提交
68

Z
1 api  
zchen0211 已提交
69 70
  const size_t slice_bytes = slice_size * sizeof(T);

71
  for (int64_t i = 0; i < index_size; ++i) {
72
    IndexT index_ = p_index[i];
Z
1 api  
zchen0211 已提交
73 74
    memcpy(p_output + i * slice_size, p_src + index_ * slice_size, slice_bytes);
  }
Z
Zhuoyuan 已提交
75
}
Z
zchen0211 已提交
76

77 78 79
template <typename T, typename IndexT = int>
void CPUGatherNd(const platform::DeviceContext& ctx, const Tensor& input,
                 const Tensor& index, Tensor* output) {
80 81 82
  PADDLE_ENFORCE_EQ(
      platform::is_cpu_place(ctx.GetPlace()), true,
      platform::errors::PreconditionNotMet("It should be running on the CPU."));
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

  auto index_dims = index.dims();
  auto index_dims_size = index_dims.size();
  auto input_dims = input.dims();
  auto input_dims_size = input_dims.size();

  const T* p_input = input.data<T>();
  const IndexT* p_index = index.data<IndexT>();
  T* p_output = output->data<T>();

  // final dim
  int64_t end_size = index_dims[index_dims_size - 1];
  // remain dim
  auto remain_ddim = framework::slice_ddim(index_dims, 0, index_dims_size - 1);
  int64_t remain_numel = framework::product(remain_ddim);
  // slice size
  int64_t slice_size = 1;
  for (int64_t i = end_size; i < input_dims_size; ++i) {
    slice_size *= input_dims[i];
  }
  const size_t slice_bytes = slice_size * sizeof(T);

  for (int64_t i = 0; i < remain_numel; ++i) {
    int64_t index_ = 0;
    int64_t temp = 1;
    for (int64_t j = end_size - 1; j >= 0; --j) {
      IndexT index_value = p_index[i * end_size + j];
110 111 112 113 114 115 116 117
      PADDLE_ENFORCE_LT(
          index_value, input_dims[j],
          platform::errors::InvalidArgument(
              "Input(index[-1)] has wrong value, it is [%d]", index_value));
      PADDLE_ENFORCE_GE(
          index_value, 0UL,
          platform::errors::InvalidArgument(
              "The value of Input(index) must be no less than 0"));
118 119 120 121 122 123 124 125 126

      index_ += (index_value * temp);
      temp *= input_dims[j];
    }
    memcpy(p_output + i * slice_size, p_input + index_ * slice_size,
           slice_bytes);
  }
}

Z
zchen0211 已提交
127 128
}  // namespace operators
}  // namespace paddle