gather.h 8.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Z
Zhuoyuan 已提交
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

Z
zchen0211 已提交
7
   http://www.apache.org/licenses/LICENSE-2.0
Z
Zhuoyuan 已提交
8 9 10 11 12 13 14 15

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Z
zchen0211 已提交
16
#include <memory.h>
Z
Zhuoyuan 已提交
17
#include <cstring>
18
#include <vector>
Z
zchen0211 已提交
19

Y
Yi Wang 已提交
20 21 22
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/tensor.h"
23
#include "paddle/fluid/operators/math/math_function.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/platform/place.h"
Z
Zhuoyuan 已提交
25

Z
zchen0211 已提交
26 27
namespace paddle {
namespace operators {
Z
Zhuoyuan 已提交
28

29 30
using framework::Tensor;

Z
zchen0211 已提交
31
/**
Z
1 api  
zchen0211 已提交
32
 * A thin wrapper for gathering on cpu tensor
Z
zchen0211 已提交
33 34
 * Return a new tensor from source tensor, gathered according to index
 * input[src]: type-T source Tensor
35
 * input[index]: type-IndexT index Tensor (1-D)
Z
zchen0211 已提交
36 37
 * return: output tensor
 */
38
template <typename T, typename IndexT = int>
39 40
void CPUGather(const platform::DeviceContext& ctx, const Tensor& src,
               const Tensor& index, Tensor* output) {
41 42 43
  PADDLE_ENFORCE_EQ(
      platform::is_cpu_place(ctx.GetPlace()), true,
      platform::errors::PreconditionNotMet("It should be running on the CPU."));
Z
zchen0211 已提交
44
  // check index of shape 1-D
45
  if (index.dims().size() == 2) {
46 47 48 49 50 51
    PADDLE_ENFORCE_EQ(
        index.dims()[1], 1,
        platform::errors::InvalidArgument(
            "index.dims()[1] should be 1 when index.dims().size() = 2"
            "in gather_op, but received value is [%d].",
            index.dims()[1]));
52 53
  } else {
    PADDLE_ENFORCE_EQ(index.dims().size(), 1,
54 55 56 57
                      platform::errors::InvalidArgument(
                          "index.dims().size() should be 1 or 2 in gather_op,"
                          "but received shape's size is [%d].",
                          index.dims().size()));
58
  }
59
  int64_t index_size = index.dims()[0];
Z
zchen0211 已提交
60

61
  auto src_dims = src.dims();
Z
zchen0211 已提交
62

63
  const T* p_src = src.data<T>();
64
  const IndexT* p_index = index.data<IndexT>();
Z
1 api  
zchen0211 已提交
65 66
  T* p_output = output->data<T>();

Z
zchen0211 已提交
67 68
  // slice size
  int slice_size = 1;
Z
zchen0211 已提交
69
  for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i];
70 71
  // input size
  int input_size = src_dims[0] * slice_size;
Z
zchen0211 已提交
72

Z
1 api  
zchen0211 已提交
73 74
  const size_t slice_bytes = slice_size * sizeof(T);

75
  for (int64_t i = 0; i < index_size; ++i) {
76
    IndexT index_ = p_index[i];
77 78 79 80 81 82 83 84 85 86 87 88
    PADDLE_ENFORCE_LT(p_index[i], input_size,
                      platform::errors::OutOfRange(
                          "The element of Index must be less than the size of "
                          "input dim size of axis which is %d, but received "
                          "index element which is %d in the %d index.",
                          input_size, p_index[i], i));
    PADDLE_ENFORCE_GE(p_index[i], 0UL,
                      platform::errors::OutOfRange(
                          "The element of Index must be greater than or equal "
                          "to 0, but received index element which is %d in the "
                          "%d index.",
                          p_index[i], i));
Z
1 api  
zchen0211 已提交
89 90
    memcpy(p_output + i * slice_size, p_src + index_ * slice_size, slice_bytes);
  }
Z
Zhuoyuan 已提交
91
}
Z
zchen0211 已提交
92

93 94 95
template <typename T, typename IndexT = int>
void CPUGatherNd(const platform::DeviceContext& ctx, const Tensor& input,
                 const Tensor& index, Tensor* output) {
96 97 98
  PADDLE_ENFORCE_EQ(
      platform::is_cpu_place(ctx.GetPlace()), true,
      platform::errors::PreconditionNotMet("It should be running on the CPU."));
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125

  auto index_dims = index.dims();
  auto index_dims_size = index_dims.size();
  auto input_dims = input.dims();
  auto input_dims_size = input_dims.size();

  const T* p_input = input.data<T>();
  const IndexT* p_index = index.data<IndexT>();
  T* p_output = output->data<T>();

  // final dim
  int64_t end_size = index_dims[index_dims_size - 1];
  // remain dim
  auto remain_ddim = framework::slice_ddim(index_dims, 0, index_dims_size - 1);
  int64_t remain_numel = framework::product(remain_ddim);
  // slice size
  int64_t slice_size = 1;
  for (int64_t i = end_size; i < input_dims_size; ++i) {
    slice_size *= input_dims[i];
  }
  const size_t slice_bytes = slice_size * sizeof(T);

  for (int64_t i = 0; i < remain_numel; ++i) {
    int64_t index_ = 0;
    int64_t temp = 1;
    for (int64_t j = end_size - 1; j >= 0; --j) {
      IndexT index_value = p_index[i * end_size + j];
126 127 128 129 130 131 132 133
      PADDLE_ENFORCE_LT(
          index_value, input_dims[j],
          platform::errors::InvalidArgument(
              "Input(index[-1)] has wrong value, it is [%d]", index_value));
      PADDLE_ENFORCE_GE(
          index_value, 0UL,
          platform::errors::InvalidArgument(
              "The value of Input(index) must be no less than 0"));
134 135 136 137 138 139 140 141 142

      index_ += (index_value * temp);
      temp *= input_dims[j];
    }
    memcpy(p_output + i * slice_size, p_input + index_ * slice_size,
           slice_bytes);
  }
}

143 144 145
template <typename T, typename U>
void GatherV2Function(const Tensor* input, const Tensor* index, int axis,
                      Tensor* out, const paddle::platform::Place& place) {
146 147 148 149 150 151 152
  auto* index_data = index->data<U>();
  int index_size = index->numel();
  int input_size = input->numel();
  auto input_dim = input->dims();
  auto* input_data = input->data<T>();

  if (input->numel() == 0) return;
153
  int axis_index = axis;
154 155 156 157

  int input_index_dim_size = input_dim[axis_index];
  for (int i = 0; i < index_size; i++) {
    PADDLE_ENFORCE_LT(index_data[i], input_index_dim_size,
158
                      platform::errors::OutOfRange(
159 160 161 162
                          "The element of Index must be less than the size of "
                          "input dim size of axis which is %d, but received "
                          "index element which is %d in the %d index.",
                          input_index_dim_size, index_data[i], i));
163 164 165 166 167 168
    PADDLE_ENFORCE_GE(index_data[i], 0UL,
                      platform::errors::OutOfRange(
                          "The element of Index must be greater than or equal "
                          "to 0, but received index element which is %d in the "
                          "%d index.",
                          index_data[i], i));
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
  }

  int inner_dim_size = 1;
  int outer_dim_size = 1;
  std::vector<int> out_dim_vec;

  for (int i = 0; i < axis_index; i++) {
    inner_dim_size *= input_dim[i];
    out_dim_vec.push_back(input_dim[i]);
  }
  out_dim_vec.push_back(index_size);
  for (int i = axis_index + 1; i < input_dim.size(); i++) {
    outer_dim_size *= input_dim[i];
    out_dim_vec.push_back(input_dim[i]);
  }
  auto out_dim = framework::make_ddim(out_dim_vec);

  out->Resize(out_dim);
  auto* out_data = out->mutable_data<T>(place);

  int out_index = 0;
  for (int i = 0; i < inner_dim_size; i++) {
    for (int j = 0; j < index_size; j++) {
      for (int k = 0; k < outer_dim_size; k++) {
        int index = k + index_data[j] * outer_dim_size +
                    (i * input_size / inner_dim_size);
        out_data[out_index] = input_data[index];
        out_index++;
      }
    }
  }
}

202
template <typename T, typename U>
203
void GatherV2GradFunction(const Tensor* input, const Tensor* index,
204
                          const int axis, Tensor* out,
205 206 207 208 209 210 211
                          const paddle::platform::Place& place) {
  auto* index_data = index->data<U>();

  auto input_dim = input->dims();
  auto* input_data = input->data<T>();

  if (input->numel() == 0) return;
212
  int axis_index = axis;
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
  int input_index_dim_size = input_dim[axis_index];

  int inner_dim_size = 1;
  int outer_dim_size = 1;

  for (int i = 0; i < axis_index; i++) {
    inner_dim_size *= input_dim[i];
  }
  for (int i = axis_index + 1; i < input_dim.size(); i++) {
    outer_dim_size *= input_dim[i];
  }

  auto* out_data = out->mutable_data<T>(place);
  auto* dev_ctx = platform::DeviceContextPool::Instance().Get(place);
  auto out_dim = out->dims();
  int out_index_dim_size = out_dim[axis_index];
  operators::math::set_constant(*dev_ctx, out, 0.0);

  for (int i = 0; i < inner_dim_size; i++) {
    for (int j = 0; j < input_index_dim_size; j++) {
      for (int k = 0; k < outer_dim_size; k++) {
        int index = k + index_data[j] * outer_dim_size +
                    i * outer_dim_size * out_index_dim_size;
        out_data[index] += input_data[j * outer_dim_size + k];
      }
    }
  }
}

Z
zchen0211 已提交
242 243
}  // namespace operators
}  // namespace paddle