reducer.cc 46.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

17 18
#include <iostream>

19
#include "paddle/fluid/framework/tensor_util.h"
20
#include "paddle/fluid/imperative/layer.h"
21
#include "paddle/fluid/imperative/parallel_context.h"
22
#include "paddle/fluid/operators/math/concat_and_split.h"
23
#include "paddle/phi/kernels/funcs/strided_memcpy.h"
24
#ifdef PADDLE_WITH_XPU
25 26
#include "paddle/fluid/platform/device/xpu/enforce_xpu.h"
#endif
27
#include "paddle/fluid/string/string_helper.h"
28
#include "paddle/phi/core/dense_tensor.h"
29 30 31
namespace paddle {
namespace imperative {

K
kuizhiqing 已提交
32 33
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) ||     \
    defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_GLOO) || \
张春乔 已提交
34
    defined(PADDLE_WITH_CNCL)
35 36
// div the nranks
void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
37
  phi::DenseTensor *tensor =
38
      is_sparse_
39
          ? sparse_contents_->GetMutable<phi::SelectedRows>()->mutable_value()
40
          : dense_contents_.GetMutable<phi::DenseTensor>();
41 42

  if (platform::is_gpu_place(tensor->place())) {
43
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
44 45
    DivNRanks(tensor, nranks, context);
#endif
K
kuizhiqing 已提交
46 47 48
  } else if (platform::is_npu_place(tensor->place())) {
    // TODO(kuizhiqing)
    VLOG(4) << "divnrank for npu not support yet";
49
  } else if (platform::is_cpu_place(tensor->place())) {
50 51
    VLOG(4) << "before div 2" << *tensor;
    VLOG(4) << "NDiv for cpu devices : rank = " << nranks;
52 53 54 55 56 57
#ifdef PADDLE_WITH_HIP
    if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
      PADDLE_THROW(paddle::platform::errors::Fatal(
          "Unsupport BF16 in DataParallel for now"));
    }
    framework::VisitDataTypeForHIP(
58
        dtype_,
L
Leo Chen 已提交
59
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
60
#else
L
Leo Chen 已提交
61 62 63
    framework::VisitDataType(
        dtype_,
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
64
#endif
65
    VLOG(4) << "after div 2" << *tensor;
66 67 68 69
  } else if (platform::is_xpu_place(tensor->place())) {
#ifdef PADDLE_WITH_XPU_BKCL
// TODO(liuyuhui) support xpu about div nranks in the future
#endif
Z
zn 已提交
70 71 72
  } else if (platform::is_mlu_place(tensor->place())) {
    // TODO(zhangna)
    VLOG(4) << "divnrank for mlu not support yet";
73 74 75
  }
}

76 77 78
template <typename DeviceContext, typename T>
static void ConcatTensorsForAllReduce(
    const DeviceContext &context,
79
    const std::vector<phi::DenseTensor> &dense_tensors_,
80 81
    framework::Variable *p_dense_contents) {
  operators::math::ConcatFunctor<DeviceContext, T> concat_functor_;
82 83 84
  concat_functor_(context,
                  dense_tensors_,
                  0,
85
                  p_dense_contents->GetMutable<phi::DenseTensor>());
86 87 88 89
}

template <typename DeviceContext, typename T>
static void SplitTensorsForAllReduce(
90 91
    const DeviceContext &context,
    framework::Variable *p_dense_contents,
92
    std::vector<phi::DenseTensor> *p_dense_tensors) {
93
  auto *in = p_dense_contents->GetMutable<phi::DenseTensor>();
94 95
  std::vector<phi::DenseTensor *> outs;
  std::vector<const phi::DenseTensor *> shape_refer;
96 97 98

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());
99

100 101 102 103 104 105
  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  // Sometimes direct copies will be faster
  if (p_dense_tensors->size() < 10) {
106 107
    phi::funcs::StridedMemcpyWithAxis0<T, DeviceContext>(
        context, *in, shape_refer, &outs);
108 109 110 111 112 113 114 115 116 117
  } else {
    operators::math::SplitFunctor<DeviceContext, T> split_functor_;
    split_functor_(context, *in, shape_refer, 0, &outs);
  }
}

// context is used to select the stream for concat
template <typename DeviceContext>
static void ConcatTensorsWithType(
    const DeviceContext &context,
118
    const std::vector<phi::DenseTensor> &dense_tensors_,
119 120 121
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
122
    case framework::proto::VarType::FP16:
123 124
      ConcatTensorsForAllReduce<DeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
125 126
      break;
    case framework::proto::VarType::FP32:
127 128
      ConcatTensorsForAllReduce<DeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
129 130
      break;
    case framework::proto::VarType::FP64:
131 132
      ConcatTensorsForAllReduce<DeviceContext, double>(
          context, dense_tensors_, p_dense_contents);
133 134 135 136 137
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
138
          framework::DataTypeToString(type)));
139 140 141 142
  }
}

// context is used to select the stream for split
143
template <typename DeviceContext>
144 145 146 147
static void SplitTensorsWithType(const DeviceContext &context,
                                 framework::Variable *p_dense_contents,
                                 std::vector<phi::DenseTensor> *p_dense_tensors,
                                 framework::proto::VarType::Type type) {
148
  switch (type) {
149
    case framework::proto::VarType::FP16:
150 151
      SplitTensorsForAllReduce<DeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
152 153
      break;
    case framework::proto::VarType::FP32:
154 155
      SplitTensorsForAllReduce<DeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
156 157
      break;
    case framework::proto::VarType::FP64:
158 159
      SplitTensorsForAllReduce<DeviceContext, double>(
          context, p_dense_contents, p_dense_tensors);
160 161 162 163 164
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
165 166 167 168
          framework::DataTypeToString(type)));
  }
}

169 170 171 172 173
#ifdef PADDLE_WITH_XPU_BKCL
template <>
void SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
174
    std::vector<phi::DenseTensor> *p_dense_tensors) {
175
  auto *in = p_dense_contents->GetMutable<phi::DenseTensor>();
176 177
  std::vector<phi::DenseTensor *> outs;
  std::vector<const phi::DenseTensor *> shape_refer;
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());

  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  operators::math::SplitFunctor<platform::XPUDeviceContext, float>
      split_functor_;
  split_functor_(context, *in, shape_refer, 0, &outs);
}

// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
195
    const std::vector<phi::DenseTensor> &dense_tensors_,
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
216
    std::vector<phi::DenseTensor> *p_dense_tensors,
217 218 219 220 221 222
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
K
kuizhiqing 已提交
223 224 225 226 227 228 229 230 231
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

Z
zn 已提交
232 233 234 235 236
#ifdef PADDLE_WITH_CNCL
// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::MLUDeviceContext>(
    const platform::MLUDeviceContext &context,
237
    const std::vector<phi::DenseTensor> &dense_tensors_,
Z
zn 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP16:
      ConcatTensorsForAllReduce<platform::MLUDeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
      break;
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::MLUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::MLUDeviceContext>(
    const platform::MLUDeviceContext &context,
    framework::Variable *p_dense_contents,
262
    std::vector<phi::DenseTensor> *p_dense_tensors,
Z
zn 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP16:
      SplitTensorsForAllReduce<platform::MLUDeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
      break;
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::MLUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

282 283 284
void Group::ConcatTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
285
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
L
Leo Chen 已提交
286 287 288 289
    ConcatTensorsWithType(static_cast<const phi::GPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
290 291 292 293
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat grad tensors since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
294 295 296 297 298
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    ConcatTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
299 300 301
        dense_tensors_,
        &dense_contents_,
        dtype_);
302 303 304 305
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat xpu grads since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
306 307 308 309 310
#endif
  } else if (platform::is_npu_place(place)) {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat npu grads since it's not compiled with HCCL,"
        "Please recompile or reinstall Paddle with HCCL support."));
张春乔 已提交
311

Z
zn 已提交
312 313 314 315
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_CNCL
    ConcatTensorsWithType(
        static_cast<const platform::MLUDeviceContext &>(context),
316 317 318
        dense_tensors_,
        &dense_contents_,
        dtype_);
Z
zn 已提交
319 320 321 322
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat mlu grads since it's not compiled with CNCL,"
        "Please recompile or reinstall Paddle with CNCL support."));
323 324
#endif
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
325 326 327 328
    ConcatTensorsWithType(static_cast<const phi::CPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
329 330 331 332 333 334 335 336 337
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Concat grad tensor not supported on place (%s)", place));
  }
}

void Group::SplitTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
338
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
L
Leo Chen 已提交
339 340 341 342
    SplitTensorsWithType(static_cast<const phi::GPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
343 344 345 346
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split grad tensor since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
347 348 349 350 351
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    SplitTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
352 353 354
        &dense_contents_,
        &dense_tensors_,
        dtype_);
355 356 357 358
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split xpu grad since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
359 360 361 362 363
#endif
  } else if (platform::is_npu_place(place)) {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split npu grad since it's not compiled with HCCL,"
        "Please recompile or reinstall Paddle with HCCL support."));
张春乔 已提交
364

Z
zn 已提交
365 366 367 368
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_CNCL
    SplitTensorsWithType(
        static_cast<const platform::MLUDeviceContext &>(context),
369 370 371
        &dense_contents_,
        &dense_tensors_,
        dtype_);
Z
zn 已提交
372 373 374 375
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split mlu grad since it's not compiled with CNCL,"
        "Please recompile or reinstall Paddle with CNCL support."));
376 377
#endif
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
378 379 380 381
    SplitTensorsWithType(static_cast<const phi::CPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
382 383 384
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Split grad tensor not supported on place (%s)", place));
385 386 387 388 389
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
390
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

406 407 408
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
409
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
410 411
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
412 413 414
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
415
      parallel_ctx_(parallel_ctx),
416
      group_size_limits_(group_size_limits),
417
      find_unused_vars_each_step_(find_unused_vars) {
418
  VLOG(3) << "Start construct the Reducer ...";
419
  nrings_ = parallel_ctx->GetNRings();
420
  nranks_ = parallel_ctx->GetNRanks();
421 422
  // initialize groups
  InitializeGroups(group_indices);
423 424
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
425
    auto var = vars_[global_var_index];
426 427
    var->GradVarBase()->AddVoidHook(std::make_shared<std::function<void()>>(
        [=]() { this->AddDistHook(global_var_index); }));
428
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
429
  }
430 431 432 433 434 435

  // for checking var is ready once
  vars_marked_ready_.resize(vars_.size(), false);

  // Initialize local used vars
  local_used_vars_.resize(vars_.size(), 0);
436 437
}

438
void Reducer::InitializeDenseGroups(
439 440 441 442 443
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
444
    const auto &var_name = var->Name();
445 446
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index],
                      false,
447
                      platform::errors::PreconditionNotMet(
448
                          "Tensor %s's GRAD must be LoDTensor, but received "
449 450 451
                          "GRAD is SelectedRows",
                          var_name));

452
    auto lod_tensor = var->MutableVar()->GetMutable<phi::DenseTensor>();
453 454
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(),
                      true,
455
                      platform::errors::PreconditionNotMet(
456
                          "Tensor %s is not initialized.", var_name));
457
    const auto size = lod_tensor->numel();
458
    PADDLE_ENFORCE_GT(
459 460
        size,
        0,
461 462
        platform::errors::PreconditionNotMet(
            "The number of tensor %s's elements is 0.", var_name));
463 464 465 466
    all_length += size;

    p_group->length_.push_back(size);

467
    // for concat operator
468
    p_group->dense_tensors_.push_back(phi::DenseTensor());
469

470
    // check the dtype and place, it must be same.
471 472
    const auto &dtype = var->DataType();
    const auto &place = var->Place();
473 474
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
475 476
          dtype,
          p_group->dtype_,
477 478 479
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
480 481
              var_name,
              framework::DataTypeToString(p_group->dtype_),
482
              framework::DataTypeToString(dtype)));
483 484
      PADDLE_ENFORCE_EQ(place,
                        place_,
485 486 487
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
488 489 490
                            var_name,
                            place_,
                            place));
491 492 493 494 495
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
496
  p_group->all_length_ = all_length;
497 498 499 500 501
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
502
// Then specify the actual memory in MarkDenseVarReady.
503 504 505 506 507 508
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
509 510
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
511 512 513 514 515

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
516 517
        variable_indices_.size(),
        0,
518
        platform::errors::PreconditionNotMet(
519
            "The number of group[%d]'s elements is 0.", group_index));
520 521 522 523 524 525 526 527 528 529 530
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
531
      InitializeDenseGroups(variable_indices_, &group);
532
    }
533 534 535

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
536
    for (const auto var_index : variable_indices_) {
537 538 539 540 541 542
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
543
    groups_.emplace_back(std::move(group));
544
    // Debug Message For Reducer
545
    VLOG(3) << "The Group[" << group_index << "]:" << groups_.back();
546 547 548
  }
}

549 550
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
551 552
      node_deps_.empty(),
      true,
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
572 573 574 575 576
      // py_layer is not supported in DataParallel
      auto begin = grad_pending_node->begin();
      auto end = grad_pending_node->end();
      for (auto op_base = begin; op_base != end; op_base++) {
        PADDLE_ENFORCE_EQ(
577 578
            op_base->Type() != "py_layer",
            true,
579 580 581 582 583 584 585 586 587
            platform::errors::PreconditionNotMet(
                "Note: Currently PyLayer is not supported in DataParallel. For "
                "using PyLayer in a DataParallel model, you can skip gradient "
                "synchronization among multiple cards by 'no_sync', and "
                "manually implement 'all_reduce' before model optimization. "
                "There is an example showing specific implemetation processing "
                "in offical docs: https://www.paddlepaddle.org.cn/documentation"
                "/docs/api/paddle/DataParallel_cn.html"));
      }
588 589 590 591 592 593 594 595 596
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

597
void Reducer::TraverseBackwardGraph(
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
660
}
661

662 663 664
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
void Reducer::PrepareForBackward(
665
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
666
  VLOG(3) << "after forward, then reset count for backward.";
667
  grad_need_hooks_ = true;
668 669 670 671 672 673 674 675 676 677 678
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
    group.sparse_contents_ = nullptr;
  });

  // reinitialize vars_marked_ready_ for next iteration
  vars_marked_ready_.clear();
  vars_marked_ready_.resize(vars_.size(), false);

  PADDLE_ENFORCE_EQ(
679 680
      groups_need_finalize_,
      false,
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
      platform::errors::PreconditionNotMet(
          "A serious error has occurred here. Please "
          "set find_unused_parameters=True to traverse backward graph "
          "in each step to prepare reduce in advance. If you have "
          "set, There may be several reasons for this error: "
          "1) Please note that all forward outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph. "
          "2) Used multiple forwards and one backward. You may be able to wrap "
          "multiple forwards in a model."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;

  if (find_unused_vars_once_ || find_unused_vars_each_step_) {
    unused_vars_.clear();
700
    TraverseBackwardGraph(outputs);
701 702 703 704 705
    // only check once in first step
    find_unused_vars_once_ = false;
  }

  if (find_unused_vars_each_step_ && unused_vars_.empty()) {
706 707 708 709 710 711 712 713
    LOG_FIRST_N(WARNING, 1)
        << "All parameters are involved in the backward pass. "
           "It is recommended to set find_unused_parameters to False "
           "to improve performance. However, if unused parameters "
           "appear in subsequent iterative training, then an error "
           "will occur. Please make it clear that in the subsequent "
           "training, there will be no parameters that are not used "
           "in the backward pass, and then set find_unused_parameters";
714 715 716
  }

  if (unused_vars_.size() == vars_.size()) {
717 718 719 720 721 722
    LOG_FIRST_N(WARNING, 1)
        << "There is no parameter in the device involved "
           "in the backward calculation. If there are "
           "parameters on other devices involved in the "
           "backward, then a serious error will occur here.";
  }
723 724 725 726 727
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
728
// MarkDenseVarReady. Find the position of the corresponding group
729 730 731 732 733
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
734
void Reducer::AddDistHook(size_t var_index) {
735 736
  PADDLE_ENFORCE_LT(var_index,
                    variable_locators_.size(),
737 738 739
                    platform::errors::OutOfRange(
                        "Out of bounds variable index. it must be less"
                        "than %d, but it is %d",
740 741
                        variable_locators_.size(),
                        var_index));
742

743 744 745 746 747
  // gradient synchronization is not required when grad_need_hooks_ is false.
  if (!grad_need_hooks_) {
    return;
  }

748 749 750
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
751

752 753
  local_used_vars_[var_index] = 1;

754
  // rebuild group when find_unused_vars_each_step_ is false
755
  if (NeedRebuildGroup()) {
756 757 758
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
759

760
  if (!has_marked_unused_vars_) {
761 762 763 764 765 766
    has_marked_unused_vars_ = true;
    for (const auto &unused_index : unused_vars_) {
      MarkVarReady(unused_index, false);
    }
  }

767 768
  MarkVarReady(var_index, true);
}
769

770
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
771 772
  groups_need_finalize_ = true;

773
  const auto &var_locator = variable_locators_[var_index];
774
  const auto group_index = var_locator.group_index;
775
  auto &group = groups_[group_index];
776

777 778 779 780
  // error happened, if the var is ready before.
  if (vars_marked_ready_[var_index]) {
    auto error_info = string::Sprintf(
        "Error happened, when parameter[%d][%s] has been ready before. "
781 782 783
        "Please set find_unused_parameters=True to traverse backward graph "
        "in each step to prepare reduce in advance. If you have set, "
        "there may be several reasons for this error: "
784 785 786 787
        "1) In multiple reentrant backward phase, some parameters are reused."
        "2) Using model parameters outside of forward function. Please "
        "make sure that model parameters are not shared in concurrent "
        "forward-backward passes.",
788 789
        var_index,
        vars_[var_index]->GradVarBase()->Name());
790

791 792
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      false,
793 794 795 796 797 798 799 800 801 802 803 804 805 806
                      platform::errors::PreconditionNotMet(error_info));

    error_info +=
        "3) Unused parameters retrieval is incorrect. "
        "The return value of forward will be used to retrieve"
        " the unused parameters of the entire model. These "
        "gradients of unused parameters will not be synchronized "
        "between multiple cards. However, if the unused "
        "parameters participate in the backward calculation "
        "again at a later time (e.g. after the forward function, "
        "the loss calculation uses the unused "
        "paramters of the forward and trigger backward), "
        "its gradient will be wrong.";

807 808
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      true,
809 810 811 812 813
                      platform::errors::PreconditionNotMet(error_info));
  } else {
    vars_marked_ready_[var_index] = true;
  }

814 815
  if (!group.is_sparse_) {
    // process dense group
816 817
    const auto inside_group_index = var_locator.inside_group_index;
    const auto length = group.length_[inside_group_index];
818
    auto &group_tensor = group.dense_tensors_[inside_group_index];
819

820
    if (is_used_var) {
821
      auto var_base = vars_[var_index]->GradVarBase();
822
      auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
823 824
      group_tensor.ShareDataWith(*tensor).Resize(
          {static_cast<int64_t>(length)});
825
    } else {
826 827
      // TODO(shenliang03): maybe save the memory
      // by avoiding tensor construction
828 829
      if (!group_tensor.IsInitialized()) {
        group_tensor.Resize({static_cast<int64_t>(length)});
830
        group_tensor.mutable_data(place_,
831
                                  framework::TransToPhiDataType(group.dtype_));
832 833
      }

834
#ifdef PADDLE_WITH_XPU_BKCL
835
      if (platform::is_xpu_place(group_tensor.place())) {
836 837 838 839
        auto dev_ctx = static_cast<platform::XPUDeviceContext *>(
            platform::DeviceContextPool::Instance().Get(place_));
        if (HasGrad(var_index)) {
          auto var_base = vars_[var_index]->GradVarBase();
840
          auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
841 842 843 844 845 846 847 848 849 850 851
          group_tensor.ShareDataWith(*tensor).Resize(
              {static_cast<int64_t>(length)});
        } else {
          group_tensor.Resize({static_cast<int64_t>(length)});
          int r = xpu::constant(dev_ctx->x_context(),
                                reinterpret_cast<float *>(group_tensor.data()),
                                group_tensor.numel(),
                                0.0f);
          PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
          PADDLE_ENFORCE_XPU_SUCCESS(xpu_wait(dev_ctx->stream()));
        }
852
      }
Z
zn 已提交
853 854 855 856 857
#elif defined(PADDLE_WITH_CNCL)
      if (platform::is_mlu_place(group_tensor.place())) {
        // TODO(liuyuhui) support MLU set constant
        VLOG(3) << "MLU doesn't support set_constant";
      }
858
#else
859 860 861
      auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
      if (HasGrad(var_index)) {
        auto var_base = vars_[var_index]->GradVarBase();
862
        auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
863 864
        group_tensor.ShareDataWith(*tensor).Resize(
            {static_cast<int64_t>(length)});
865 866
      } else {
        group_tensor.Resize({static_cast<int64_t>(length)});
867
        phi::funcs::set_constant(*dev_ctx, &group_tensor, 0.0);
868
      }
869
#endif
870 871 872
    }
  } else {
    // process sparse group
873
    PADDLE_ENFORCE_EQ(
874 875
        HasGrad(var_index),
        true,
876 877 878 879 880 881 882
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] should have gradient. "
            "Currently, DataParallel does not support sparse "
            "parameters without generating gradients during training. "
            "For example, if is_sparese=True is used in Embedding, "
            "the current step of this parameter cannot generate gradient "
            "because of stop_gradient/detatch, where error will occur.",
883 884
            var_index,
            vars_[var_index]->Name()));
885 886 887
    auto var_base = vars_[var_index]->GradVarBase();
    // need to check tensor type
    PADDLE_ENFORCE_EQ(
888 889
        var_base->Var().IsType<phi::SelectedRows>(),
        true,
890 891 892 893 894 895 896 897 898
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] must have a selectedrows gradient. "
            "Before forward pass, the parameter type is inferred to be "
            "SelectedRows, but after backward pass, its actual type becomes "
            "LodTensor. It is currently not supported by DataParallel. "
            "For example, if sparse embedding is used, and the weight of "
            "embedding is shared with subsequent dense parameters, then "
            "the parameter gradient of the embedding will be converted "
            "to dense parameters.",
899 900
            var_index,
            vars_[var_index]->Name()));
901 902

    group.sparse_contents_ = var_base->MutableVar();
903
  }
904

905 906 907 908 909 910 911 912 913 914
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

915
// TODO(liuyuhui): If BKCL support non-blocking communication, it should be
916
// fixed as same as multi gpus card training.
917
void Reducer::MarkGroupReady(size_t group_index) {
918
  PADDLE_ENFORCE_GE(
919 920
      group_index,
      next_group_,
921 922 923 924
      platform::errors::PreconditionNotMet(
          "The index of the incoming group must be greater "
          "than or equal to the previously synchronized group index, "
          "expect it to greater than or equal to %d, but got %d.",
925 926
          next_group_,
          group_index));
927

928
  if (group_index > next_group_) {
929
    VLOG(3) << "It will adjust the order of group in next batch automatically";
930 931 932 933 934
    return;
  }

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
935 936
    UNUSED auto &group = groups_[next_group_];
    UNUSED const int run_order = next_group_ % nrings_;
937

938
    auto *tensor = group.dense_contents_.GetMutable<phi::DenseTensor>();
939 940 941
    tensor->Resize(phi::make_ddim({group.all_length_}))
        .mutable_data(place_, framework::TransToPhiDataType(group.dtype_));

942 943 944 945 946 947
    // For CUDA or XPU, compute_stream --> comm_stream.
    // For CPU, do nothing.
    // NOTE. Because concat uses the comm_stream,
    // so we expose WaitCompute() interface and call
    // it here.
    parallel_ctx_->WaitCompute(run_order);
948
    FusedAllReduceSchedule(run_order, group, next_group_);
949 950 951
  }
}

952 953
void Reducer::FusedAllReduceSchedule(const int run_order,
                                     Group &group,
954 955 956 957
                                     const int curr_group_index) {
  // The overall timeline: concat > div_nranks > allreduce > split
  // dev_context is used to select different stream
  const auto &dev_context = *parallel_ctx_->GetDeviceContext(run_order);
958
  if (group.is_sparse_) {
959 960 961
    VLOG(3) << "sparse group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
    group.DivNRanks(dev_context, nranks_);
962 963
    parallel_ctx_->AllReduceByStream(
        *group.sparse_contents_, group.sparse_contents_, run_order, false);
964
  } else {
965 966
    VLOG(3) << "dense group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
967 968
    // Select common commstream to concat tensors
    // group.dense_tensors ---> group.dense_contents_
969
    group.ConcatTensors(dev_context);
970

971
    group.DivNRanks(dev_context, nranks_);
972 973 974
    // Start allreduce
    parallel_ctx_->AllReduceByStream(
        group.dense_contents_, &(group.dense_contents_), run_order, false);
975

976
    // Select communication stream to split tensors
977
    // group.dense_contents_ ---> group.dense_tensors
978
    group.SplitTensors(dev_context);
979 980 981
  }
}

982
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
983 984 985 986
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
987 988
      rebuild_vars_.size(),
      vars_.size(),
989 990 991
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
992 993
          vars_.size(),
          rebuild_vars_.size()));
994 995
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
996 997 998 999
  auto rebuild_group_indices = AssignGroupBySize(rebuild_vars_,
                                                 is_sparse_gradient_,
                                                 group_size_limits_,
                                                 rebuild_var_indices_);
1000 1001 1002 1003 1004 1005 1006
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

1007 1008 1009 1010 1011 1012 1013
void Reducer::ProcessUnusedDenseVars() {
  // The calculation stream must be used here to
  // avoid conflicts with communication.
  VLOG(3) << "Local used vars : "
          << string::join_strings(local_used_vars_, ',');
  const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
  // H2D is to allreduce the local_used_vars_
1014
  auto *global_used_tensor = global_used_vars_.GetMutable<phi::DenseTensor>();
1015 1016 1017 1018 1019 1020
  framework::TensorFromVector<int>(
      local_used_vars_, *dev_ctx, global_used_tensor);
  parallel_ctx_->AllReduceByStream(
      global_used_vars_, &global_used_vars_, 0, true);
  framework::TensorToVector<int>(
      *global_used_tensor, *dev_ctx, &local_used_vars_);
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

  // sync compute stream to get global used var message,
  // but maybe affect speed performance
  parallel_ctx_->SynchronizeCompute();
  VLOG(3) << "Global used vars : "
          << string::join_strings(local_used_vars_, ',');

  for (const auto var_index : unused_vars_) {
    const bool global_unused = (local_used_vars_[var_index] == 0);

    // global used but local unused, set grad
    VLOG(3) << "Var [" << var_index << "] [" << vars_[var_index]->Name()
            << "] global_unused:" << global_unused
            << "  has grad: " << HasGrad(var_index);

    if (!global_unused) {
      VLOG(3) << "Start process unused Var";
      // 1. source var base
      const auto &var_locator = variable_locators_[var_index];
      const auto group_index = var_locator.group_index;
      const auto &group = groups_[group_index];
      const auto inside_group_index = var_locator.inside_group_index;
      const auto &src_tensor = group.dense_tensors_[inside_group_index];
      // sparse no need to check and no support find_unused_parameters
      if (group.is_sparse_) {
        continue;
      }
      // 2. destination var base
      auto dest_var_base = vars_[var_index];
      auto *dest_tensor =
1051
          dest_var_base->MutableVar()->GetMutable<phi::DenseTensor>();
1052 1053 1054 1055
      const auto &dest_dims = dest_tensor->dims();

      // 3. create grad var base or get grad var base
      auto grad_var_base_tmp = dest_var_base->MutableGradVarBase();
1056 1057 1058 1059
      // NOTE(haohongxiang): Calling SetIsEmpty here is to make sure that
      // gradient accumulation can continue normally after clear_gradients()
      // especiall in cases including complex control flow.
      grad_var_base_tmp->SharedVar()->SetIsEmpty(false);
1060 1061 1062

      // 4. set grad tensor
      auto *dest_grad_tensor =
1063
          grad_var_base_tmp->MutableVar()->GetMutable<phi::DenseTensor>();
1064
      const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
1065 1066
      paddle::framework::TensorCopy(
          src_tensor, place_, *dev_ctx, dest_grad_tensor);
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
      dest_grad_tensor->Resize(dest_dims);
    }
  }
}

bool Reducer::HasGrad(size_t var_index) {
  const auto grad_var = vars_[var_index]->GradVarBase();
  if (!grad_var || !grad_var->Var().IsInitialized()) {
    return false;
  }

  const auto &var = grad_var->Var();
1079 1080
  if (var.IsType<phi::DenseTensor>()) {
    if (var.Get<phi::DenseTensor>().IsInitialized()) {
1081 1082
      return true;
    }
1083 1084
  } else if (var.IsType<phi::SelectedRows>()) {
    if (var.Get<phi::SelectedRows>().value().IsInitialized()) {
1085 1086 1087 1088 1089 1090 1091 1092 1093
      return true;
    }
  } else {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Only support LoDTensor and SelectedRows for gradient var"));
  }
  return false;
}

1094
void Reducer::FinalizeBackward() {
1095
  groups_need_finalize_ = false;
1096
  grad_need_hooks_ = false;
1097

1098 1099
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
1100
    parallel_ctx_->WaitComm(i);
1101 1102
  }

1103 1104 1105 1106 1107 1108
  for (auto &group : groups_) {
    if (!group.is_sparse_) {
      group.dense_contents_.Clear();
    }
  }

1109
  if (NeedRebuildGroup()) {
1110 1111 1112 1113 1114
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    group_indices_ = std::move(rebuild_group_indices);
    InitializeGroups(group_indices_);
  }
1115

1116
  if (find_unused_vars_each_step_) {
1117
// TODO(liuyuhui) support xpu about Tensorcopy/TensorFromVector/TensorToVector
张春乔 已提交
1118 1119
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
    defined(PADDLE_WITH_GLOO) || defined(PADDLE_WITH_CNCL)
1120 1121 1122 1123 1124 1125 1126 1127 1128
    ProcessUnusedDenseVars();
#endif
    // Initialize local used vars
    local_used_vars_.clear();
    local_used_vars_.resize(vars_.size(), 0);
    VLOG(3) << "ProcessUnusedDenseVars is finished.";
  }

  VLOG(3) << "In the batch, Reducer is finished.";
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
1141 1142
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
1143 1144
  PADDLE_ENFORCE_EQ(vars.size(),
                    is_sparse_gradient.size(),
1145 1146 1147
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
1148 1149
                        vars.size(),
                        is_sparse_gradient.size()));
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
1161 1162
  PADDLE_ENFORCE_EQ(true,
                    check_perm(tensor_indices),
1163 1164 1165
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
1180 1181 1182 1183 1184 1185 1186

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
1187
      // we keep sparse var a single group
1188
      res.push_back({tensor_real_index});
1189 1190 1191 1192 1193 1194 1195 1196 1197
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
1198 1199
    if (var->Var().IsType<phi::DenseTensor>()) {
      var_size = var->Var().Get<phi::DenseTensor>().numel();
1200 1201 1202 1203 1204
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
1205
    group_info.first.push_back(tensor_real_index);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
1232 1233
        group_index.empty(),
        true,
1234 1235 1236
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
1237
  if (tensor_indices.empty()) {
1238 1239
    std::sort(res.begin(),
              res.end(),
1240 1241 1242 1243
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
1244 1245 1246 1247 1248 1249
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle