selected_rows_functor.cc 15.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

M
minqiyang 已提交
15
#include <algorithm>
T
wip  
typhoonzero 已提交
16
#include <set>
Q
Qiao Longfei 已提交
17
#include <unordered_map>
T
wip  
typhoonzero 已提交
18

S
sneaxiy 已提交
19
#include "paddle/fluid/operators/math/blas.h"
Y
Yi Wang 已提交
20
#include "paddle/fluid/operators/math/selected_rows_functor.h"
21 22 23 24 25

namespace paddle {
namespace operators {
namespace math {
template <typename T>
Q
QI JUN 已提交
26 27
struct SelectedRowsAdd<platform::CPUDeviceContext, T> {
  void operator()(const platform::CPUDeviceContext& context,
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
                  const framework::SelectedRows& input1,
                  const framework::SelectedRows& input2,
                  framework::SelectedRows* output) {
    auto in1_height = input1.height();
    PADDLE_ENFORCE_EQ(in1_height, input2.height());
    output->set_height(in1_height);

    auto& in1_rows = input1.rows();
    auto& in2_rows = input2.rows();
    std::vector<int64_t> out_rows;
    out_rows.reserve(in1_rows.size() + in2_rows.size());

    // concat rows
    out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
    out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
    output->set_rows(out_rows);

    auto* out_value = output->mutable_value();
    auto& in1_value = input1.value();
    auto& in2_value = input2.value();

    auto in1_row_numel = in1_value.numel() / in1_rows.size();
    PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size());
    PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size());

    auto in1_place = input1.place();
    PADDLE_ENFORCE(platform::is_cpu_place(in1_place));
    auto in2_place = input2.place();
    PADDLE_ENFORCE(platform::is_cpu_place(in2_place));
    auto out_place = context.GetPlace();
    PADDLE_ENFORCE(platform::is_cpu_place(out_place));

    auto* out_data = out_value->data<T>();
    auto* in1_data = in1_value.data<T>();
    memory::Copy(boost::get<platform::CPUPlace>(out_place), out_data,
                 boost::get<platform::CPUPlace>(in1_place), in1_data,
                 in1_value.numel() * sizeof(T));

    auto* in2_data = in2_value.data<T>();
    memory::Copy(boost::get<platform::CPUPlace>(out_place),
                 out_data + in1_value.numel(),
                 boost::get<platform::CPUPlace>(in2_place), in2_data,
                 in2_value.numel() * sizeof(T));
  }
};

Q
QI JUN 已提交
74 75
template struct SelectedRowsAdd<platform::CPUDeviceContext, float>;
template struct SelectedRowsAdd<platform::CPUDeviceContext, double>;
76 77

template <typename T>
Q
QI JUN 已提交
78 79
struct SelectedRowsAddTensor<platform::CPUDeviceContext, T> {
  void operator()(const platform::CPUDeviceContext& context,
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
                  const framework::SelectedRows& input1,
                  const framework::Tensor& input2, framework::Tensor* output) {
    auto in1_height = input1.height();
    auto in2_dims = input2.dims();
    auto out_dims = output->dims();
    PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
    PADDLE_ENFORCE_EQ(in1_height, out_dims[0]);

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
    PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height);
    PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height);

Q
QI JUN 已提交
95
    SetConstant<platform::CPUDeviceContext, T> functor;
96 97 98 99 100 101 102 103 104 105 106 107 108 109
    functor(context, output, 0.0);

    auto* in1_data = in1_value.data<T>();
    auto* out_data = output->data<T>();

    for (size_t i = 0; i < in1_rows.size(); i++) {
      for (int64_t j = 0; j < in1_row_numel; j++) {
        out_data[in1_rows[i] * in1_row_numel + j] +=
            in1_data[i * in1_row_numel + j];
      }
    }

    auto out_eigen = framework::EigenVector<T>::Flatten(*output);
    auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
Q
QI JUN 已提交
110
    out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
111 112 113
  }
};

Q
QI JUN 已提交
114 115
template struct SelectedRowsAddTensor<platform::CPUDeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CPUDeviceContext, double>;
Q
QI JUN 已提交
116 117

template <typename T>
Q
QI JUN 已提交
118 119
struct SelectedRowsAddTo<platform::CPUDeviceContext, T> {
  void operator()(const platform::CPUDeviceContext& context,
Q
QI JUN 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132
                  const framework::SelectedRows& input1,
                  const int64_t input2_offset,
                  framework::SelectedRows* input2) {
    auto in1_height = input1.height();
    PADDLE_ENFORCE_EQ(in1_height, input2->height());

    auto& in1_rows = input1.rows();
    auto& in2_rows = *(input2->mutable_rows());

    auto& in1_value = input1.value();
    auto* in2_value = input2->mutable_value();

    // concat rows
Y
Yu Yang 已提交
133
    in2_rows.Extend(in1_rows.begin(), in1_rows.end());
Q
QI JUN 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148

    auto in1_place = input1.place();
    PADDLE_ENFORCE(platform::is_cpu_place(in1_place));
    auto in2_place = input2->place();
    PADDLE_ENFORCE(platform::is_cpu_place(in2_place));

    auto* in1_data = in1_value.data<T>();
    auto* in2_data = in2_value->data<T>();
    memory::Copy(boost::get<platform::CPUPlace>(in2_place),
                 in2_data + input2_offset,
                 boost::get<platform::CPUPlace>(in1_place), in1_data,
                 in1_value.numel() * sizeof(T));
  }
};

Q
QI JUN 已提交
149 150 151 152
template struct SelectedRowsAddTo<platform::CPUDeviceContext, float>;
template struct SelectedRowsAddTo<platform::CPUDeviceContext, double>;
template struct SelectedRowsAddTo<platform::CPUDeviceContext, int>;
template struct SelectedRowsAddTo<platform::CPUDeviceContext, int64_t>;
Q
QI JUN 已提交
153

M
minqiyang 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
template <typename T>
struct SelectedRowsSumTo<platform::CPUDeviceContext, T> {
  void operator()(const platform::CPUDeviceContext& context,
                  const std::vector<framework::SelectedRows*>& input1,
                  const std::vector<int64_t>& input2_offsets,
                  framework::SelectedRows* input2) {
    // Ensure all selected rows have the same height
    size_t size = 0u;
    for (auto iter = input1.begin(); iter != input1.end(); ++iter) {
      auto& in_rows = (*iter)->rows();
      size += in_rows.end() - in_rows.begin();
      auto in1_height = (*iter)->height();
      PADDLE_ENFORCE_EQ(in1_height, input2->height());
    }
    // concat rows
    std::vector<int64_t> in2_rows;
    in2_rows.reserve(in2_rows.size() + size);
    for (auto iter = input1.begin(); iter != input1.end(); ++iter) {
      const framework::Vector<int64_t>& in_rows = (*iter)->rows();
      in2_rows.insert(in2_rows.end(), in_rows.begin(), in_rows.end());
    }
    input2->set_rows(in2_rows);

    auto* in2_value = input2->mutable_value();
    auto* in2_data = in2_value->data<T>();
    auto blas = math::GetBlas<platform::CPUDeviceContext, T>(context);
    size_t offset = 0u;
    for (size_t i = 0u; i != input1.size(); ++i) {
      auto& in_value = input1[i]->value();
      const auto* in_data = in_value.data<T>();
      offset += input2_offsets[i];
      blas.VCOPY(in_value.numel(), in_data, in2_data + offset);
    }
  }
};

template struct SelectedRowsSumTo<platform::CPUDeviceContext, float>;
template struct SelectedRowsSumTo<platform::CPUDeviceContext, double>;

Q
QI JUN 已提交
193
template <typename T>
Q
QI JUN 已提交
194 195
struct SelectedRowsAddToTensor<platform::CPUDeviceContext, T> {
  void operator()(const platform::CPUDeviceContext& context,
Q
QI JUN 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
                  const framework::SelectedRows& input1,
                  framework::Tensor* input2) {
    auto in1_height = input1.height();
    auto in2_dims = input2->dims();
    PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
    PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);

    auto* in1_data = in1_value.data<T>();
    auto* input2_data = input2->data<T>();

    for (size_t i = 0; i < in1_rows.size(); i++) {
      for (int64_t j = 0; j < in1_row_numel; j++) {
        input2_data[in1_rows[i] * in1_row_numel + j] +=
            in1_data[i * in1_row_numel + j];
      }
    }
  }
};

Q
QI JUN 已提交
220 221 222 223
template struct SelectedRowsAddToTensor<platform::CPUDeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CPUDeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CPUDeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CPUDeviceContext, int64_t>;
224

T
typhoonzero 已提交
225 226 227 228 229 230 231 232
// This is a separated namespace for manipulate SelectedRows typed
// data. Like merge duplicated rows, adding two SelectedRows etc.
//
// Another group of functors is called "scatter updates", which means
// use SelectedRows to update a dense tensor with different Ops, like
// add or mul.
namespace scatter {

Q
Qiao Longfei 已提交
233 234 235 236
template <typename DeviceContext, typename T>
typename std::enable_if<
    std::is_floating_point<T>::value &&
    std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type
237 238
elementwise_add_to(const DeviceContext& ctx, BlasT<DeviceContext, T>* blas,
                   size_t data_len, const T* in, T* out) {
Q
Qiao Longfei 已提交
239
  blas->AXPY(data_len, 1., in, out);
Q
Qiao Longfei 已提交
240 241 242 243 244 245
}

template <typename DeviceContext, typename T>
typename std::enable_if<
    !std::is_floating_point<T>::value &&
    std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type
246 247
elementwise_add_to(const DeviceContext& ctx, BlasT<DeviceContext, T>* blas,
                   size_t data_len, const T* in, T* out) {
T
Tao Luo 已提交
248
  for (size_t i = 0; i < data_len; i++) {
Q
Qiao Longfei 已提交
249 250
    out[i] += in[i];
  }
T
typhoonzero 已提交
251 252 253 254
}

template <typename T>
struct MergeAdd<platform::CPUDeviceContext, T> {
T
wip  
typhoonzero 已提交
255
  framework::SelectedRows operator()(const platform::CPUDeviceContext& context,
256 257
                                     const framework::SelectedRows& input,
                                     const bool sorted_result = false) {
T
wip  
typhoonzero 已提交
258
    framework::SelectedRows out;
259
    (*this)(context, input, &out, sorted_result);
S
sneaxiy 已提交
260 261 262 263 264
    return out;
  }

  void operator()(const platform::CPUDeviceContext& context,
                  const framework::SelectedRows& input,
265 266
                  framework::SelectedRows* output,
                  const bool sorted_result = false) {
267 268
    std::vector<const framework::SelectedRows*> inputs;
    inputs.push_back(&input);
269
    (*this)(context, inputs, output, sorted_result);
270
  }
T
typhoonzero 已提交
271

272 273
  void operator()(const platform::CPUDeviceContext& context,
                  const std::vector<const framework::SelectedRows*>& inputs,
274 275
                  framework::SelectedRows* output,
                  const bool sorted_result = false) {
Q
Qiao Longfei 已提交
276
    if (inputs.size() == 0) {
M
minqiyang 已提交
277
      VLOG(3) << "no input! return";
Q
Qiao Longfei 已提交
278 279 280 281
      return;
    }
    const framework::SelectedRows* has_value_input = nullptr;
    for (auto* in : inputs) {
Q
Qiao Longfei 已提交
282
      if (in->rows().size() > 0) {
Q
Qiao Longfei 已提交
283 284 285 286 287
        has_value_input = in;
        break;
      }
    }
    if (has_value_input == nullptr) {
M
minqiyang 已提交
288
      VLOG(3) << "no input has value! just return" << std::endl;
Q
Qiao Longfei 已提交
289 290 291 292
      return;
    }
    auto input_width = has_value_input->value().dims()[1];
    auto input_height = has_value_input->height();
293 294 295
    framework::SelectedRows& out = *output;
    std::set<int64_t> merged_row_set;
    for (auto* input : inputs) {
Q
Qiao Longfei 已提交
296
      if (input->rows().size() == 0) {
Q
Qiao Longfei 已提交
297 298
        continue;
      }
299 300 301 302 303 304 305 306 307
      PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1],
                        "all input should have same "
                        "dimension except for the first one");
      PADDLE_ENFORCE_EQ(input_height, input->height(),
                        "all input should have same height");
      merged_row_set.insert(input->rows().begin(), input->rows().end());
    }
    std::vector<int64_t> merge_rows(merged_row_set.begin(),
                                    merged_row_set.end());
308 309
    if (sorted_result) {
      std::sort(merge_rows.begin(), merge_rows.end());
M
minqiyang 已提交
310
    }
Q
Qiao Longfei 已提交
311
    std::unordered_map<int64_t, size_t> rows_to_id;
Q
Qiao Longfei 已提交
312 313 314
    for (size_t i = 0; i < merge_rows.size(); ++i) {
      rows_to_id[merge_rows[i]] = i;
    }
T
wip  
typhoonzero 已提交
315
    out.set_rows(merge_rows);
316
    out.set_height(input_height);
T
wip  
typhoonzero 已提交
317
    out.mutable_value()->mutable_data<T>(
T
typhoonzero 已提交
318 319 320 321 322
        framework::make_ddim(
            {static_cast<int64_t>(merge_rows.size()), input_width}),
        context.GetPlace());

    math::SetConstant<platform::CPUDeviceContext, T> constant_functor;
T
wip  
typhoonzero 已提交
323
    constant_functor(context, out.mutable_value(), 0.0);
T
typhoonzero 已提交
324

T
wip  
typhoonzero 已提交
325
    auto* out_data = out.mutable_value()->data<T>();
T
typhoonzero 已提交
326

Q
Qiao Longfei 已提交
327
    auto blas = math::GetBlas<platform::CPUDeviceContext, T>(context);
328
    for (auto* input : inputs) {
Q
Qiao Longfei 已提交
329
      if (input->rows().size() == 0) {
Q
Qiao Longfei 已提交
330 331
        continue;
      }
332 333 334 335
      auto* input_data = input->value().data<T>();
      auto& input_rows = input->rows();

      for (size_t i = 0; i < input_rows.size(); i++) {
Q
Qiao Longfei 已提交
336
        size_t out_i = rows_to_id[input_rows[i]];
337
        elementwise_add_to<platform::CPUDeviceContext, T>(
Q
Qiao Longfei 已提交
338
            context, &blas, static_cast<size_t>(input_width),
Q
Qiao Longfei 已提交
339
            &input_data[i * input_width], &out_data[out_i * input_width]);
T
typhoonzero 已提交
340 341
      }
    }
T
wip  
typhoonzero 已提交
342 343 344 345 346
  }
};

template struct MergeAdd<platform::CPUDeviceContext, int>;
template struct MergeAdd<platform::CPUDeviceContext, int64_t>;
Q
Qiao Longfei 已提交
347 348
template struct MergeAdd<platform::CPUDeviceContext, float>;
template struct MergeAdd<platform::CPUDeviceContext, double>;
T
wip  
typhoonzero 已提交
349 350 351

template <typename T>
struct UpdateToTensor<platform::CPUDeviceContext, T> {
T
typhoonzero 已提交
352 353 354
  void operator()(const platform::CPUDeviceContext& context,
                  const ScatterOps& op, const framework::SelectedRows& input1,
                  framework::Tensor* input2) {
T
wip  
typhoonzero 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    auto in1_height = input1.height();
    auto in2_dims = input2->dims();
    PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
    PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);

    auto* in1_data = in1_value.data<T>();
    auto* input2_data = input2->data<T>();

    // FIXME(typhoonzero): use macro fix the below messy code.
    switch (op) {
      case ScatterOps::ASSIGN:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] =
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::ADD:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] +=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::SUB:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] -=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::SUBBY:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] =
            in1_data[i * in1_row_numel + j] -
            input2_data[in1_rows[i] * in1_row_numel + j];
        break;
      case ScatterOps::MUL:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] *=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::DIV:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] /=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::DIVBY:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] =
            in1_data[i * in1_row_numel + j] /
            input2_data[in1_rows[i] * in1_row_numel + j];
        break;
    }
T
typhoonzero 已提交
408 409 410 411
  }
};

}  // namespace scatter
412 413 414
}  // namespace math
}  // namespace operators
}  // namespace paddle