selected_rows_functor.cc 36.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
16

17 18 19 20 21 22
#include <algorithm>
#include <map>
#include <set>
#include <vector>

#include "paddle/phi/core/ddim.h"
H
Huang Jiyi 已提交
23
#include "paddle/phi/core/mixed_vector.h"
24

25 26 27 28
#ifdef PADDLE_WITH_XPU
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#endif

L
lidanqing 已提交
29
#ifdef PADDLE_WITH_MKLDNN
30
#include "paddle/phi/backends/onednn/axpy_handler.h"
L
lidanqing 已提交
31 32
#endif

33 34
namespace phi {
namespace funcs {
35
template <typename T>
L
Leo Chen 已提交
36 37
struct SelectedRowsAdd<phi::CPUContext, T> {
  void operator()(const phi::CPUContext& context,
38
                  const phi::SelectedRows& input1,
39 40
                  const phi::SelectedRows& input2,
                  phi::SelectedRows* output) {
41
    auto in1_height = input1.height();
42
    PADDLE_ENFORCE_EQ(
43 44
        in1_height,
        input2.height(),
45 46 47 48 49
        phi::errors::InvalidArgument("The two inputs height must be equal."
                                     "But received first input height  = "
                                     "[%d], second input height = [%d]",
                                     in1_height,
                                     input2.height()));
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
    output->set_height(in1_height);

    auto& in1_rows = input1.rows();
    auto& in2_rows = input2.rows();
    std::vector<int64_t> out_rows;
    out_rows.reserve(in1_rows.size() + in2_rows.size());

    // concat rows
    out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
    out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
    output->set_rows(out_rows);

    auto* out_value = output->mutable_value();
    auto& in1_value = input1.value();
    auto& in2_value = input2.value();

    auto in1_row_numel = in1_value.numel() / in1_rows.size();
67
    PADDLE_ENFORCE_EQ(
68 69
        in1_row_numel,
        in2_value.numel() / in2_rows.size(),
70
        phi::errors::InvalidArgument(
71
            "The two inputs width must be equal."
72
            "But received first input width = [%d], second input width = [%d]",
73 74
            in1_row_numel,
            in2_value.numel() / in2_rows.size()));
75
    PADDLE_ENFORCE_EQ(
76 77
        in1_row_numel,
        out_value->numel() / out_rows.size(),
78
        phi::errors::InvalidArgument(
79
            "The input and oupput width must be equal."
80
            "But received input width = [%d], output width = [%d]",
81 82
            in1_row_numel,
            out_value->numel() / out_rows.size()));
83 84

    auto in1_place = input1.place();
85
    PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::CPU,
86
                      true,
87
                      phi::errors::InvalidArgument(
88
                          "The running environment is not on the CPU place."));
89
    auto in2_place = input2.place();
90
    PADDLE_ENFORCE_EQ(in2_place.GetType() == phi::AllocationType::CPU,
91
                      true,
92
                      phi::errors::InvalidArgument(
93
                          "The running environment is not on the CPU place."));
94
    auto out_place = context.GetPlace();
95
    PADDLE_ENFORCE_EQ(out_place.GetType() == phi::AllocationType::CPU,
96
                      true,
97
                      phi::errors::InvalidArgument(
98
                          "The running environment is not on the CPU place."));
99 100 101

    auto* out_data = out_value->data<T>();
    auto* in1_data = in1_value.data<T>();
102 103 104 105 106
    memory_utils::Copy(out_place,
                       out_data,
                       in1_place,
                       in1_data,
                       in1_value.numel() * sizeof(T));
107 108

    auto* in2_data = in2_value.data<T>();
109 110 111 112 113
    memory_utils::Copy(out_place,
                       out_data + in1_value.numel(),
                       in2_place,
                       in2_data,
                       in2_value.numel() * sizeof(T));
114 115 116
  }
};

L
Leo Chen 已提交
117 118
template struct SelectedRowsAdd<phi::CPUContext, float>;
template struct SelectedRowsAdd<phi::CPUContext, double>;
119 120

template <typename T>
L
Leo Chen 已提交
121 122
struct SelectedRowsAddTensor<phi::CPUContext, T> {
  void operator()(const phi::CPUContext& context,
123
                  const phi::SelectedRows& input1,
124 125
                  const phi::DenseTensor& input2,
                  phi::DenseTensor* output) {
126
    auto in1_height = input1.height();
127 128
    const auto& in2_dims = input2.dims();
    const auto& out_dims = output->dims();
129
    PADDLE_ENFORCE_EQ(
130 131
        in1_height,
        in2_dims[0],
132 133 134 135 136
        phi::errors::InvalidArgument("The two inputs height must be equal."
                                     "But received first input height = "
                                     "[%d], second input height = [%d]",
                                     in1_height,
                                     in2_dims[0]));
137
    PADDLE_ENFORCE_EQ(
138 139
        in1_height,
        out_dims[0],
140
        phi::errors::InvalidArgument(
141
            "The input and output height must be equal."
142
            "But received input height = [%d], output height = [%d]",
143 144
            in1_height,
            out_dims[0]));
145 146 147 148 149

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
150
    PADDLE_ENFORCE_EQ(
151 152
        in1_row_numel,
        input2.numel() / in1_height,
153
        phi::errors::InvalidArgument(
154
            "The two inputs width must be equal."
155
            "But received first input width = [%d], second input width = [%d]",
156 157
            in1_row_numel,
            input2.numel() / in1_height));
158
    PADDLE_ENFORCE_EQ(
159 160
        in1_row_numel,
        output->numel() / in1_height,
161
        phi::errors::InvalidArgument(
162
            "The input and output width must be equal."
163
            "But received input width = [%d], output width = [%d]",
164 165
            in1_row_numel,
            output->numel() / in1_height));
166

L
Leo Chen 已提交
167
    phi::funcs::SetConstant<phi::CPUContext, T> functor;
168 169 170 171 172 173 174 175 176 177 178 179
    functor(context, output, 0.0);

    auto* in1_data = in1_value.data<T>();
    auto* out_data = output->data<T>();

    for (size_t i = 0; i < in1_rows.size(); i++) {
      for (int64_t j = 0; j < in1_row_numel; j++) {
        out_data[in1_rows[i] * in1_row_numel + j] +=
            in1_data[i * in1_row_numel + j];
      }
    }

180 181
    auto out_eigen = EigenVector<T>::Flatten(*output);
    auto in2_eigen = EigenVector<T>::Flatten(input2);
Q
QI JUN 已提交
182
    out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
183 184 185
  }
};

L
Leo Chen 已提交
186 187
template struct SelectedRowsAddTensor<phi::CPUContext, float>;
template struct SelectedRowsAddTensor<phi::CPUContext, double>;
Q
QI JUN 已提交
188 189

template <typename T>
L
Leo Chen 已提交
190 191
struct SelectedRowsAddTo<phi::CPUContext, T> {
  void operator()(const phi::CPUContext& context,
192 193
                  const phi::SelectedRows& input1,
                  const int64_t input2_offset,
194
                  phi::SelectedRows* input2) {
Q
QI JUN 已提交
195
    auto in1_height = input1.height();
196
    PADDLE_ENFORCE_EQ(
197 198
        in1_height,
        input2->height(),
199 200 201 202 203
        phi::errors::InvalidArgument("The two inputs height must be equal."
                                     "But received first input height = "
                                     "[%d], second input height = [%d]",
                                     in1_height,
                                     input2->height()));
Q
QI JUN 已提交
204 205 206 207 208 209 210 211

    auto& in1_rows = input1.rows();
    auto& in2_rows = *(input2->mutable_rows());

    auto& in1_value = input1.value();
    auto* in2_value = input2->mutable_value();

    // concat rows
H
Huang Jiyi 已提交
212
    phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
213
    mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
Q
QI JUN 已提交
214 215

    auto in1_place = input1.place();
216
    PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::CPU,
217
                      true,
218
                      phi::errors::InvalidArgument(
219
                          "The running environment is not on the CPU place."));
Q
QI JUN 已提交
220
    auto in2_place = input2->place();
221
    PADDLE_ENFORCE_EQ(in2_place.GetType() == phi::AllocationType::CPU,
222
                      true,
223
                      phi::errors::InvalidArgument(
224
                          "The running environment is not on the CPU place."));
Q
QI JUN 已提交
225 226 227

    auto* in1_data = in1_value.data<T>();
    auto* in2_data = in2_value->data<T>();
228 229 230 231 232
    memory_utils::Copy(in2_place,
                       in2_data + input2_offset,
                       in1_place,
                       in1_data,
                       in1_value.numel() * sizeof(T));
Q
QI JUN 已提交
233 234 235
  }
};

L
Leo Chen 已提交
236 237 238 239
template struct SelectedRowsAddTo<phi::CPUContext, float>;
template struct SelectedRowsAddTo<phi::CPUContext, double>;
template struct SelectedRowsAddTo<phi::CPUContext, int>;
template struct SelectedRowsAddTo<phi::CPUContext, int64_t>;
Q
QI JUN 已提交
240

M
minqiyang 已提交
241
template <typename T>
L
Leo Chen 已提交
242 243
struct SelectedRowsSumTo<phi::CPUContext, T> {
  void operator()(const phi::CPUContext& context,
244
                  const std::vector<phi::SelectedRows*>& input1,
M
minqiyang 已提交
245
                  const std::vector<int64_t>& input2_offsets,
246
                  phi::SelectedRows* input2) {
M
minqiyang 已提交
247 248 249 250 251 252
    // Ensure all selected rows have the same height
    size_t size = 0u;
    for (auto iter = input1.begin(); iter != input1.end(); ++iter) {
      auto& in_rows = (*iter)->rows();
      size += in_rows.end() - in_rows.begin();
      auto in1_height = (*iter)->height();
253 254
      PADDLE_ENFORCE_EQ(in1_height,
                        input2->height(),
255
                        phi::errors::InvalidArgument(
256
                            "The two inputs height must be equal."
257
                            "But received first input height = [%d], second "
258
                            "input height = [%d]",
259 260
                            in1_height,
                            input2->height()));
M
minqiyang 已提交
261 262 263 264 265
    }
    // concat rows
    std::vector<int64_t> in2_rows;
    in2_rows.reserve(in2_rows.size() + size);
    for (auto iter = input1.begin(); iter != input1.end(); ++iter) {
H
Huang Jiyi 已提交
266
      const phi::Vector<int64_t>& in_rows = (*iter)->rows();
M
minqiyang 已提交
267 268 269 270 271 272
      in2_rows.insert(in2_rows.end(), in_rows.begin(), in_rows.end());
    }
    input2->set_rows(in2_rows);

    auto* in2_value = input2->mutable_value();
    auto* in2_data = in2_value->data<T>();
L
Leo Chen 已提交
273
    auto blas = phi::funcs::GetBlas<phi::CPUContext, T>(context);
M
minqiyang 已提交
274 275 276 277 278 279 280 281 282 283
    size_t offset = 0u;
    for (size_t i = 0u; i != input1.size(); ++i) {
      auto& in_value = input1[i]->value();
      const auto* in_data = in_value.data<T>();
      offset += input2_offsets[i];
      blas.VCOPY(in_value.numel(), in_data, in2_data + offset);
    }
  }
};

L
Leo Chen 已提交
284 285
template struct SelectedRowsSumTo<phi::CPUContext, float>;
template struct SelectedRowsSumTo<phi::CPUContext, double>;
M
minqiyang 已提交
286

H
hong 已提交
287 288 289
template <typename T>
struct SelectedRowsAddToTensor<phi::CPUContext, T> {
  void operator()(const phi::CPUContext& context,
290
                  const phi::SelectedRows& input1,
291
                  phi::DenseTensor* input2) {
H
hong 已提交
292 293 294 295 296
    if (UNLIKELY(input1.rows().size() == 0)) {
      LOG(WARNING) << "input selected rows is empty!";
      return;
    }
    auto in1_height = input1.height();
297
    const auto& in2_dims = input2->dims();
H
hong 已提交
298
    PADDLE_ENFORCE_EQ(
299 300
        in1_height,
        in2_dims[0],
301 302 303 304 305
        phi::errors::InvalidArgument("The two inputs height must be equal."
                                     "But received first input height = "
                                     "[%d], second input height = [%d]",
                                     in1_height,
                                     in2_dims[0]));
H
hong 已提交
306 307 308 309 310 311

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
    PADDLE_ENFORCE_EQ(
312 313
        in1_row_numel,
        input2->numel() / in1_height,
314
        phi::errors::InvalidArgument(
H
hong 已提交
315
            "The two inputs width must be equal."
316
            "But received first input width = [%d], second input width = [%d]",
317 318
            in1_row_numel,
            input2->numel() / in1_height));
H
hong 已提交
319 320 321 322 323 324 325 326 327 328 329 330 331

    auto* in1_data = in1_value.data<T>();
    auto* input2_data = input2->data<T>();

    for (size_t i = 0; i < in1_rows.size(); i++) {
      for (int64_t j = 0; j < in1_row_numel; j++) {
        input2_data[in1_rows[i] * in1_row_numel + j] +=
            in1_data[i * in1_row_numel + j];
      }
    }
  }
};

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
#ifdef PADDLE_WITH_XPU
template <typename T>
struct SelectedRowsAddToTensor<phi::XPUContext, T> {
  void operator()(const phi::XPUContext& context,
                  const phi::SelectedRows& input1,
                  phi::DenseTensor* input2) {
    if (UNLIKELY(input1.rows().size() == 0)) {
      LOG(WARNING) << "input selected rows is empty!";
      return;
    }
    using XPUType = typename XPUTypeTrait<T>::Type;
    auto in1_height = input1.height();
    const auto& in2_dims = input2->dims();
    PADDLE_ENFORCE_EQ(
        in1_height,
        in2_dims[0],
        phi::errors::InvalidArgument("The two inputs height must be equal."
                                     "But received first input height = "
                                     "[%d], second input height = [%d]",
                                     in1_height,
                                     in2_dims[0]));

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();
    int64_t* in1_rows_data = nullptr;
    xpu::VectorParam<int64_t> in1_rows_vec{
        in1_rows.data(), static_cast<int>(in1_rows.size()), in1_rows_data};

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
    PADDLE_ENFORCE_EQ(
        in1_row_numel,
        input2->numel() / in1_height,
        phi::errors::InvalidArgument(
            "The two inputs width must be equal."
            "But received first input width = [%d], second input width = [%d]",
            in1_row_numel,
            input2->numel() / in1_height));

    auto* in1_data = in1_value.data<T>();
    auto* out_data = input2->data<T>();

    int h = in1_rows.size();
    int w = in1_row_numel;
    const std::vector<int> xshape{h, w};

    int r = xpu::scatter<XPUType, int64_t>(
        context.x_context(),
        nullptr,
        reinterpret_cast<const XPUType*>(in1_data),
        reinterpret_cast<XPUType*>(out_data),
        in1_rows_vec,
        xshape,
        0,
        false);
    PADDLE_ENFORCE_XDNN_SUCCESS(r, "scatter");
  }
};

#endif

H
hong 已提交
392 393 394 395
template struct SelectedRowsAddToTensor<phi::CPUContext, float>;
template struct SelectedRowsAddToTensor<phi::CPUContext, double>;
template struct SelectedRowsAddToTensor<phi::CPUContext, int>;
template struct SelectedRowsAddToTensor<phi::CPUContext, int64_t>;
396
template struct SelectedRowsAddToTensor<phi::CPUContext, phi::dtype::bfloat16>;
397 398 399 400

#ifdef PADDLE_WITH_XPU
template struct SelectedRowsAddToTensor<phi::XPUContext, float>;
#endif
T
typhoonzero 已提交
401 402 403 404 405 406 407 408
// This is a separated namespace for manipulate SelectedRows typed
// data. Like merge duplicated rows, adding two SelectedRows etc.
//
// Another group of functors is called "scatter updates", which means
// use SelectedRows to update a dense tensor with different Ops, like
// add or mul.
namespace scatter {

409
template <typename T, typename DeviceContext>
410
typename std::enable_if<!std::is_integral<T>::value>::type elementwise_add_to(
411 412 413
    phi::funcs::BlasT<DeviceContext, T>* blas,
    size_t data_len,
    const T* in,
414
    T* out) {
415
  blas->AXPY(data_len, T(1.f), in, out);
Q
Qiao Longfei 已提交
416 417
}

418
template <typename T, typename DeviceContext>
419
typename std::enable_if<std::is_integral<T>::value>::type elementwise_add_to(
420 421 422
    phi::funcs::BlasT<DeviceContext, T>* blas,
    size_t data_len,
    const T* in,
423
    T* out) {
T
Tao Luo 已提交
424
  for (size_t i = 0; i < data_len; i++) {
Q
Qiao Longfei 已提交
425 426
    out[i] += in[i];
  }
T
typhoonzero 已提交
427 428
}

429
template <typename T, typename DeviceContext>
430
typename std::enable_if<std::is_same<T, phi::dtype::bfloat16>::value>::type
431
add_sparse_inputs(const std::vector<const phi::SelectedRows*>& inputs,
432
                  const std::unordered_map<int64_t, size_t>& rows_to_id,
433 434
                  int64_t input_width,
                  const DeviceContext& context,
435
                  T* out_data) {
436
#ifndef PADDLE_WITH_MKLDNN
437
  auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
438 439 440 441 442 443 444 445 446
#endif
  for (auto* input : inputs) {
    if (input->rows().size() == 0) {
      continue;
    }
    auto* input_data = input->value().data<T>();
    auto& input_rows = input->rows();

#ifdef PADDLE_WITH_MKLDNN
447 448 449
    OneDNNContext onednn_context(context.GetPlace());
    funcs::OneDNNAXPYHandler<T> axpy_handler(
        input_width, T(1.f), onednn_context.GetEngine());
450 451 452 453 454 455 456 457
    for (size_t i = 0; i < input_rows.size(); i++) {
      size_t out_i = rows_to_id.at(input_rows[i]);
      axpy_handler(&input_data[i * input_width],
                   &out_data[out_i * input_width]);
    }
#else
    for (size_t i = 0; i < input_rows.size(); i++) {
      size_t out_i = rows_to_id.at(input_rows[i]);
458 459 460 461
      elementwise_add_to<T, DeviceContext>(&blas,
                                           static_cast<size_t>(input_width),
                                           &input_data[i * input_width],
                                           &out_data[out_i * input_width]);
462 463 464 465 466
    }
#endif
  }
}

467
template <typename T, typename DeviceContext>
468
typename std::enable_if<!std::is_same<T, phi::dtype::bfloat16>::value>::type
469
add_sparse_inputs(const std::vector<const phi::SelectedRows*>& inputs,
470
                  const std::unordered_map<int64_t, size_t>& rows_to_id,
471 472
                  int64_t input_width,
                  const DeviceContext& context,
473
                  T* out_data) {
474
  VLOG(4) << "[CPU] add_sparse_inputs <" << typeid(T).name();
475
  auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
476 477 478 479 480 481 482 483 484
  for (auto* input : inputs) {
    if (input->rows().size() == 0) {
      continue;
    }
    auto* input_data = input->value().data<T>();
    auto& input_rows = input->rows();

    for (size_t i = 0; i < input_rows.size(); i++) {
      size_t out_i = rows_to_id.at(input_rows[i]);
485 486 487 488
      elementwise_add_to<T, DeviceContext>(&blas,
                                           static_cast<size_t>(input_width),
                                           &input_data[i * input_width],
                                           &out_data[out_i * input_width]);
489 490 491 492
    }
  }
}

493 494 495
template <typename DeviceContext, typename T>
struct MergeAddImpl {
  phi::SelectedRows operator()(const DeviceContext& context,
496 497 498
                               const phi::SelectedRows& input,
                               const bool sorted_result = false) {
    phi::SelectedRows out;
499
    (*this)(context, input, &out, sorted_result);
S
sneaxiy 已提交
500 501 502
    return out;
  }

503 504 505 506
  void operator()(const DeviceContext& context,
                  const phi::SelectedRows& input,
                  phi::SelectedRows* output,
                  const bool sorted_result = false) {
507
    std::vector<const phi::SelectedRows*> inputs;
508
    inputs.push_back(&input);
509
    (*this)(context, inputs, output, sorted_result);
510
  }
T
typhoonzero 已提交
511

512
  void operator()(const DeviceContext& context,
513
                  const std::vector<const phi::SelectedRows*>& inputs,
514 515
                  phi::SelectedRows* output,
                  const bool sorted_result = false) {
Q
Qiao Longfei 已提交
516
    if (inputs.size() == 0) {
M
minqiyang 已提交
517
      VLOG(3) << "no input! return";
Q
Qiao Longfei 已提交
518 519
      return;
    }
520
    const phi::SelectedRows* has_value_input = nullptr;
Q
Qiao Longfei 已提交
521
    for (auto* in : inputs) {
Q
Qiao Longfei 已提交
522
      if (in->rows().size() > 0) {
Q
Qiao Longfei 已提交
523 524 525 526 527
        has_value_input = in;
        break;
      }
    }
    if (has_value_input == nullptr) {
M
minqiyang 已提交
528
      VLOG(3) << "no input has value! just return" << std::endl;
Q
Qiao Longfei 已提交
529 530 531 532
      return;
    }
    auto input_width = has_value_input->value().dims()[1];
    auto input_height = has_value_input->height();
533
    phi::SelectedRows& out = *output;
534
    std::set<int64_t> merged_row_set;
535
    size_t row_num = 0;
536
    for (auto* input : inputs) {
Q
Qiao Longfei 已提交
537
      if (input->rows().size() == 0) {
Q
Qiao Longfei 已提交
538 539
        continue;
      }
540 541 542 543 544 545 546 547 548
      PADDLE_ENFORCE_EQ(
          input_width,
          input->value().dims()[1],
          phi::errors::InvalidArgument("All inputs should have same "
                                       "dimension except for the first one."));
      PADDLE_ENFORCE_EQ(
          input_height,
          input->height(),
          phi::errors::InvalidArgument("All inputs should have same height."));
549
      row_num += input->rows().size();
550 551
      merged_row_set.insert(input->rows().begin(), input->rows().end());
    }
552

553
    out.set_height(input_height);
554 555 556 557
    DenseTensor* out_tensor = out.mutable_value();
    out_tensor->Resize(phi::make_ddim(
        {static_cast<int64_t>(merged_row_set.size()), input_width}));
    auto* out_data = context.template Alloc<T>(out_tensor);
T
typhoonzero 已提交
558

559 560 561 562 563 564
    if (merged_row_set.size() == row_num && !sorted_result) {
      // no duplicated ids, just concat the result together
      std::vector<int64_t> merge_rows;
      merge_rows.reserve(row_num);
      // concat rows
      for (auto* in : inputs) {
565 566
        merge_rows.insert(
            merge_rows.end(), in->rows().begin(), in->rows().end());
567 568 569 570 571 572 573
      }
      out.set_rows(merge_rows);
      auto in_place = inputs[0]->place();
      auto out_place = out.place();
      int64_t copied_numel = 0;
      for (auto* in : inputs) {
        auto* in_data = in->value().data<T>();
574
        auto in_numel = in->rows().size() * input_width;
575 576 577 578 579
        memory_utils::Copy(out_place,
                           out_data + copied_numel,
                           in_place,
                           in_data,
                           in_numel * sizeof(T));
580 581 582 583 584
        copied_numel += in_numel;
      }
    } else {
      std::vector<int64_t> merge_rows(merged_row_set.begin(),
                                      merged_row_set.end());
T
typhoonzero 已提交
585

586 587 588
      if (sorted_result) {
        std::sort(merge_rows.begin(), merge_rows.end());
      }
T
typhoonzero 已提交
589

590 591
      out.set_rows(merge_rows);

592
      phi::funcs::SetConstant<DeviceContext, T> constant_functor;
593
      constant_functor(context, out.mutable_value(), static_cast<T>(0.f));
594 595 596 597

      std::unordered_map<int64_t, size_t> rows_to_id;
      for (size_t i = 0; i < merge_rows.size(); ++i) {
        rows_to_id[merge_rows[i]] = i;
Q
Qiao Longfei 已提交
598
      }
599

600 601
      add_sparse_inputs<T, DeviceContext>(
          inputs, rows_to_id, input_width, context, out_data);
T
typhoonzero 已提交
602
    }
T
wip  
typhoonzero 已提交
603 604 605
  }
};

606 607 608 609 610 611 612 613 614 615 616
template <typename T>
struct MergeAdd<phi::CPUContext, T> {
  // unary functor, merge by adding duplicated rows in
  // the input SelectedRows object.
  phi::SelectedRows operator()(const phi::CPUContext& context,
                               const phi::SelectedRows& input,
                               const bool sorted_result) {
    return MergeAddImpl<phi::CPUContext, T>()(context, input, sorted_result);
  }

  void operator()(const phi::CPUContext& context,
617 618
                  const phi::SelectedRows& input,
                  phi::SelectedRows* output,
619 620 621 622 623 624
                  const bool sorted_result) {
    MergeAddImpl<phi::CPUContext, T>()(context, input, output, sorted_result);
  }

  void operator()(const phi::CPUContext& context,
                  const std::vector<const phi::SelectedRows*>& inputs,
625 626
                  phi::SelectedRows* output,
                  const bool sorted_result) {
627 628 629 630
    MergeAddImpl<phi::CPUContext, T>()(context, inputs, output, sorted_result);
  }
};

L
Leo Chen 已提交
631 632
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(dtype)    \
  template struct MergeAddImpl<phi::CPUContext, dtype>; \
633 634 635 636 637 638
  template struct MergeAdd<phi::CPUContext, dtype>;

TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(int64_t)
639 640 641
TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD_CPU(phi::dtype::complex<double>)
642

643 644
#ifdef PADDLE_WITH_XPU
template <typename T>
645 646
struct MergeAdd<phi::XPUContext, T> {
  phi::SelectedRows operator()(const phi::XPUContext& context,
647 648 649
                               const phi::SelectedRows& input,
                               const bool sorted_result = false) {
    phi::SelectedRows out;
650 651 652 653
    (*this)(context, input, &out, sorted_result);
    return out;
  }

654
  void operator()(const phi::XPUContext& context,
655 656
                  const phi::SelectedRows& input,
                  phi::SelectedRows* output,
657
                  const bool sorted_result = false) {
H
Huang Jiyi 已提交
658
    phi::Vector<int64_t> input_rows(input.rows());
659 660 661 662
    if (input_rows.size() == 0) {
      return;
    }

663
    phi::SelectedRows& out = *output;
664 665 666 667 668 669
    std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
    std::vector<int64_t> merge_rows(row_set.begin(), row_set.end());
    auto input_width = input.value().dims()[1];

    out.set_rows(merge_rows);
    out.set_height(input.height());
670 671 672 673
    DenseTensor* out_tensor = out.mutable_value();
    out_tensor->Resize(
        phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
    context.template Alloc<T>(out_tensor);
674 675 676 677 678 679

    std::unordered_map<int64_t, size_t> rows_to_id;
    for (size_t i = 0; i < merge_rows.size(); ++i) {
      rows_to_id[merge_rows[i]] = i;
    }

680 681 682 683
    auto* y_data = out.mutable_value()->data<T>();
    auto* x_data = input.value().data<T>();
    int xm = input_rows.size();
    int ym = merge_rows.size();
684
    int n = input_width;
685 686 687 688

    xpu::ctx_guard RAII_GUARD(context.x_context());
    int64_t* x_rows_data = RAII_GUARD.alloc_l3_or_gm<int64_t>(xm);
    int64_t* y_rows_data = RAII_GUARD.alloc_l3_or_gm<int64_t>(ym);
689 690 691 692 693 694 695 696 697 698
    memory_utils::Copy(context.GetPlace(),
                       y_rows_data,
                       phi::CPUPlace(),
                       merge_rows.data(),
                       ym * sizeof(int64_t));
    memory_utils::Copy(context.GetPlace(),
                       x_rows_data,
                       phi::CPUPlace(),
                       input_rows.data(),
                       xm * sizeof(int64_t));
699 700 701 702 703 704 705 706
    int r = xpu::merge_dup_rows<T, int64_t>(context.x_context(),
                                            x_data,
                                            y_data,
                                            x_rows_data,
                                            y_rows_data,
                                            xm,
                                            n,
                                            ym);
707
    PADDLE_ENFORCE_XDNN_SUCCESS(r, "merge_dup_rows");
708 709
  }

710
  void operator()(const phi::XPUContext& context,
711
                  const std::vector<const phi::SelectedRows*>& inputs,
712 713
                  phi::SelectedRows* output,
                  const bool sorted_result = false) {
714 715 716 717
    if (inputs.size() == 0) {
      VLOG(3) << "no input! return";
      return;
    }
718
    const phi::SelectedRows* has_value_input = nullptr;
719 720 721 722 723 724 725 726 727 728 729 730
    for (auto* in : inputs) {
      if (in->rows().size() > 0) {
        has_value_input = in;
        break;
      }
    }
    if (has_value_input == nullptr) {
      VLOG(3) << "no input has value! just return" << std::endl;
      return;
    }
    auto input_width = has_value_input->value().dims()[1];
    auto input_height = has_value_input->height();
731
    phi::SelectedRows& out = *output;
732 733 734 735 736 737
    std::set<int64_t> merged_row_set;
    size_t row_num = 0;
    for (auto* input : inputs) {
      if (input->rows().size() == 0) {
        continue;
      }
738 739 740 741 742 743 744 745 746
      PADDLE_ENFORCE_EQ(
          input_width,
          input->value().dims()[1],
          phi::errors::InvalidArgument("All inputs should have same "
                                       "dimension except for the first one."));
      PADDLE_ENFORCE_EQ(
          input_height,
          input->height(),
          phi::errors::InvalidArgument("All inputs should have same height."));
747 748 749 750 751 752 753 754 755 756 757 758 759 760
      row_num += input->rows().size();
      merged_row_set.insert(input->rows().begin(), input->rows().end());
    }

    std::vector<int64_t> merge_rows(merged_row_set.begin(),
                                    merged_row_set.end());

    if (sorted_result) {
      std::sort(merge_rows.begin(), merge_rows.end());
    }

    out.set_rows(merge_rows);
    out.set_height(input_height);

761 762 763 764 765 766
    DenseTensor* out_tensor = out.mutable_value();
    out_tensor->Resize(phi::make_ddim(
        {static_cast<int64_t>(merged_row_set.size()), input_width}));
    context.template Alloc<T>(out_tensor);

    float* y_data = reinterpret_cast<float*>(out_tensor->data<T>());
767 768 769 770 771 772 773 774 775 776 777 778

    std::unordered_map<int64_t, size_t> rows_to_id;
    for (size_t i = 0; i < merge_rows.size(); ++i) {
      rows_to_id[merge_rows[i]] = i;
    }

    for (auto* input : inputs) {
      if (input->rows().size() == 0) {
        continue;
      }
      auto& input_rows = input->rows();

779 780 781
      auto* x_data = input->value().data<T>();
      int xm = input_rows.size();
      int ym = merge_rows.size();
782
      int n = input_width;
783 784 785 786

      xpu::ctx_guard RAII_GUARD(context.x_context());
      int64_t* x_rows_data = RAII_GUARD.alloc_l3_or_gm<int64_t>(xm);
      int64_t* y_rows_data = RAII_GUARD.alloc_l3_or_gm<int64_t>(ym);
787 788 789 790 791 792 793 794 795 796
      memory_utils::Copy(context.GetPlace(),
                         y_rows_data,
                         phi::CPUPlace(),
                         merge_rows.data(),
                         ym * sizeof(int64_t));
      memory_utils::Copy(context.GetPlace(),
                         x_rows_data,
                         phi::CPUPlace(),
                         input_rows.data(),
                         xm * sizeof(int64_t));
797 798 799 800 801 802 803 804
      int r = xpu::merge_dup_rows<T, int64_t>(context.x_context(),
                                              x_data,
                                              y_data,
                                              x_rows_data,
                                              y_rows_data,
                                              xm,
                                              n,
                                              ym);
805
      PADDLE_ENFORCE_XDNN_SUCCESS(r, "merge_dup_rows");
806 807 808 809 810
    }
  }
};

#endif
811
template <typename T>
L
Leo Chen 已提交
812 813
struct MergeAverage<phi::CPUContext, T> {
  phi::SelectedRows operator()(const phi::CPUContext& context,
814 815
                               const phi::SelectedRows& input) {
    phi::SelectedRows out;
816 817 818 819
    (*this)(context, input, &out);
    return out;
  }

L
Leo Chen 已提交
820
  void operator()(const phi::CPUContext& context,
821 822
                  const phi::SelectedRows& input,
                  phi::SelectedRows* output) {
823
    std::vector<const phi::SelectedRows*> inputs;
824 825 826 827
    inputs.push_back(&input);
    (*this)(context, inputs, output);
  }

L
Leo Chen 已提交
828
  void operator()(const phi::CPUContext& context,
829 830
                  const std::vector<const phi::SelectedRows*>& inputs,
                  phi::SelectedRows* output) {
831 832 833 834
    if (inputs.size() == 0) {
      VLOG(3) << "no input! return";
      return;
    }
835
    const phi::SelectedRows* has_value_input = nullptr;
836 837 838 839 840 841 842 843 844 845 846 847
    for (auto* in : inputs) {
      if (in->rows().size() > 0) {
        has_value_input = in;
        break;
      }
    }
    if (has_value_input == nullptr) {
      VLOG(3) << "no input has value! just return" << std::endl;
      return;
    }
    auto input_width = has_value_input->value().dims()[1];
    auto input_height = has_value_input->height();
848
    phi::SelectedRows& out = *output;
849 850 851 852 853 854
    std::set<int64_t> merged_row_set;
    size_t row_num = 0;
    for (auto* input : inputs) {
      if (input->rows().size() == 0) {
        continue;
      }
855 856 857 858 859 860 861 862 863
      PADDLE_ENFORCE_EQ(
          input_width,
          input->value().dims()[1],
          phi::errors::InvalidArgument("All inputs should have same "
                                       "dimension except for the first one."));
      PADDLE_ENFORCE_EQ(
          input_height,
          input->height(),
          phi::errors::InvalidArgument("All input should have same height."));
864 865 866 867 868
      row_num += input->rows().size();
      merged_row_set.insert(input->rows().begin(), input->rows().end());
    }

    out.set_height(input_height);
869 870 871 872 873

    DenseTensor* out_tensor = out.mutable_value();
    out_tensor->Resize(phi::make_ddim(
        {static_cast<int64_t>(merged_row_set.size()), input_width}));
    auto* out_data = context.template Alloc<T>(out_tensor);
874 875 876 877 878 879 880

    std::vector<int64_t> merge_rows(merged_row_set.begin(),
                                    merged_row_set.end());
    std::sort(merge_rows.begin(), merge_rows.end());

    out.set_rows(merge_rows);

L
Leo Chen 已提交
881
    phi::funcs::SetConstant<phi::CPUContext, T> constant_functor;
882 883 884 885 886 887 888
    constant_functor(context, out.mutable_value(), 0.0);

    std::unordered_map<int64_t, size_t> rows_to_id;
    for (size_t i = 0; i < merge_rows.size(); ++i) {
      rows_to_id[merge_rows[i]] = i;
    }

L
Leo Chen 已提交
889
    auto blas = phi::funcs::GetBlas<phi::CPUContext, T>(context);
890 891 892 893 894 895 896 897 898
    for (auto* input : inputs) {
      if (input->rows().size() == 0) {
        continue;
      }
      auto* input_data = input->value().data<T>();
      auto& input_rows = input->rows();

      for (size_t i = 0; i < input_rows.size(); i++) {
        size_t out_i = rows_to_id[input_rows[i]];
899 900
        elementwise_add_to<T>(&blas,
                              static_cast<size_t>(input_width),
901 902
                              &input_data[i * input_width],
                              &out_data[out_i * input_width]);
903 904 905 906 907 908 909 910 911 912 913 914
      }
    }
    size_t input_width_cast = static_cast<size_t>(input_width);
    T count = static_cast<T>(inputs.size());
    for (size_t i = 0; i < merge_rows.size(); i++) {
      for (size_t j = 0; j < input_width_cast; j++) {
        out_data[i * input_width + j] = out_data[i * input_width + j] / count;
      }
    }
  }
};

915
#ifdef PADDLE_WITH_XPU
916
template struct MergeAdd<phi::XPUContext, float>;
917 918
#endif

L
Leo Chen 已提交
919 920 921 922
template struct MergeAverage<phi::CPUContext, int>;
template struct MergeAverage<phi::CPUContext, int64_t>;
template struct MergeAverage<phi::CPUContext, float>;
template struct MergeAverage<phi::CPUContext, double>;
923

T
wip  
typhoonzero 已提交
924
template <typename T>
L
Leo Chen 已提交
925 926
struct UpdateToTensor<phi::CPUContext, T> {
  void operator()(const phi::CPUContext& context,
927 928
                  const ScatterOps& op,
                  const phi::SelectedRows& input1,
929
                  phi::DenseTensor* input2) {
T
wip  
typhoonzero 已提交
930
    auto in1_height = input1.height();
931
    const auto& in2_dims = input2->dims();
932
    PADDLE_ENFORCE_EQ(
933 934
        in1_height,
        in2_dims[0],
935 936 937 938 939
        phi::errors::InvalidArgument("The two inputs height must be equal."
                                     "But received first input height = "
                                     "[%d], second input height = [%d]",
                                     in1_height,
                                     in2_dims[0]));
T
wip  
typhoonzero 已提交
940 941 942 943 944

    auto& in1_value = input1.value();
    auto& in1_rows = input1.rows();

    int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
945
    PADDLE_ENFORCE_EQ(
946 947
        in1_row_numel,
        input2->numel() / in1_height,
948 949 950 951 952
        phi::errors::InvalidArgument("The two inputs width must be equal."
                                     "But received first input width = [%d], "
                                     "second input width = [%d]",
                                     in1_row_numel,
                                     input2->numel() / in1_height));
T
wip  
typhoonzero 已提交
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996

    auto* in1_data = in1_value.data<T>();
    auto* input2_data = input2->data<T>();

    // FIXME(typhoonzero): use macro fix the below messy code.
    switch (op) {
      case ScatterOps::ASSIGN:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] =
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::ADD:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] +=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::SUB:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] -=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::SUBBY:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] =
            in1_data[i * in1_row_numel + j] -
            input2_data[in1_rows[i] * in1_row_numel + j];
        break;
      case ScatterOps::MUL:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] *=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::DIV:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] /=
            in1_data[i * in1_row_numel + j];
        break;
      case ScatterOps::DIVBY:
        INLINE_FOR2(in1_rows.size(), in1_row_numel)
        input2_data[in1_rows[i] * in1_row_numel + j] =
            in1_data[i * in1_row_numel + j] /
            input2_data[in1_rows[i] * in1_row_numel + j];
        break;
    }
T
typhoonzero 已提交
997 998 999 1000
  }
};

}  // namespace scatter
1001 1002
}  // namespace funcs
}  // namespace phi