reducer.cc 44.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

17 18
#include <iostream>

19
#include "paddle/fluid/framework/tensor_util.h"
20 21 22 23 24 25 26 27
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/string/string_helper.h"

#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"

#include "paddle/fluid/imperative/parallel_context.h"

28
#include "paddle/pten/core/dense_tensor.h"
29 30 31
namespace paddle {
namespace imperative {

K
kuizhiqing 已提交
32 33 34
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) ||     \
    defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_GLOO) || \
    defined(PADDLE_WITH_ASCEND_CL)
35 36 37 38 39 40 41 42 43
// div the nranks
void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
  framework::Tensor *tensor =
      is_sparse_
          ? sparse_contents_->GetMutable<framework::SelectedRows>()
                ->mutable_value()
          : dense_contents_.GetMutable<framework::LoDTensor>();

  if (platform::is_gpu_place(tensor->place())) {
44
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
45 46
    DivNRanks(tensor, nranks, context);
#endif
K
kuizhiqing 已提交
47 48 49
  } else if (platform::is_npu_place(tensor->place())) {
    // TODO(kuizhiqing)
    VLOG(4) << "divnrank for npu not support yet";
50
  } else if (platform::is_cpu_place(tensor->place())) {
51 52
    VLOG(4) << "before div 2" << *tensor;
    VLOG(4) << "NDiv for cpu devices : rank = " << nranks;
53 54 55 56 57 58
#ifdef PADDLE_WITH_HIP
    if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
      PADDLE_THROW(paddle::platform::errors::Fatal(
          "Unsupport BF16 in DataParallel for now"));
    }
    framework::VisitDataTypeForHIP(
59 60
        dtype_, DivNRanksForAllReduce<platform::CPUDeviceContext>(
                    tensor, nranks, context));
61 62 63 64 65
#else
    framework::VisitDataType(dtype_,
                             DivNRanksForAllReduce<platform::CPUDeviceContext>(
                                 tensor, nranks, context));
#endif
66
    VLOG(4) << "after div 2" << *tensor;
67 68 69 70 71 72 73
  } else if (platform::is_xpu_place(tensor->place())) {
#ifdef PADDLE_WITH_XPU_BKCL
// TODO(liuyuhui) support xpu about div nranks in the future
#endif
  }
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
template <typename DeviceContext, typename T>
static void ConcatTensorsForAllReduce(
    const DeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents) {
  operators::math::ConcatFunctor<DeviceContext, T> concat_functor_;
  concat_functor_(context, dense_tensors_, 0,
                  p_dense_contents->GetMutable<framework::LoDTensor>());
}

template <typename DeviceContext, typename T>
static void SplitTensorsForAllReduce(
    const DeviceContext &context, framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors) {
  auto *in = p_dense_contents->GetMutable<framework::LoDTensor>();
  std::vector<framework::Tensor *> outs;
  std::vector<const framework::Tensor *> shape_refer;

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());
94

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  // Sometimes direct copies will be faster
  if (p_dense_tensors->size() < 10) {
    operators::StridedMemcpyWithAxis0<T>(context, *in, shape_refer, &outs);
  } else {
    operators::math::SplitFunctor<DeviceContext, T> split_functor_;
    split_functor_(context, *in, shape_refer, 0, &outs);
  }
}

// context is used to select the stream for concat
template <typename DeviceContext>
static void ConcatTensorsWithType(
    const DeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
116
    case framework::proto::VarType::FP16:
117 118
      ConcatTensorsForAllReduce<DeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
119 120
      break;
    case framework::proto::VarType::FP32:
121 122
      ConcatTensorsForAllReduce<DeviceContext, float>(context, dense_tensors_,
                                                      p_dense_contents);
123 124
      break;
    case framework::proto::VarType::FP64:
125 126
      ConcatTensorsForAllReduce<DeviceContext, double>(context, dense_tensors_,
                                                       p_dense_contents);
127 128 129 130 131
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
132
          framework::DataTypeToString(type)));
133 134 135 136
  }
}

// context is used to select the stream for split
137 138 139 140 141 142
template <typename DeviceContext>
static void SplitTensorsWithType(
    const DeviceContext &context, framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
143
    case framework::proto::VarType::FP16:
144 145
      SplitTensorsForAllReduce<DeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
146 147
      break;
    case framework::proto::VarType::FP32:
148 149
      SplitTensorsForAllReduce<DeviceContext, float>(context, p_dense_contents,
                                                     p_dense_tensors);
150 151
      break;
    case framework::proto::VarType::FP64:
152 153
      SplitTensorsForAllReduce<DeviceContext, double>(context, p_dense_contents,
                                                      p_dense_tensors);
154 155 156 157 158
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
159 160 161 162
          framework::DataTypeToString(type)));
  }
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
#ifdef PADDLE_WITH_XPU_BKCL
template <>
void SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors) {
  auto *in = p_dense_contents->GetMutable<framework::LoDTensor>();
  std::vector<framework::Tensor *> outs;
  std::vector<const framework::Tensor *> shape_refer;

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());

  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  operators::math::SplitFunctor<platform::XPUDeviceContext, float>
      split_functor_;
  split_functor_(context, *in, shape_refer, 0, &outs);
}

// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
K
kuizhiqing 已提交
217 218 219 220 221 222 223 224 225
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

226 227 228
void Group::ConcatTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
229
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
230 231 232 233 234 235 236
    ConcatTensorsWithType(
        static_cast<const platform::CUDADeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat grad tensors since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
237 238 239 240 241 242 243 244 245 246
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    ConcatTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat xpu grads since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
247 248 249 250 251 252 253 254 255 256
#endif
  } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    ConcatTensorsWithType(
        static_cast<const platform::NPUDeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat npu grads since it's not compiled with HCCL,"
        "Please recompile or reinstall Paddle with HCCL support."));
257 258 259 260 261 262 263 264 265 266 267 268 269 270
#endif
  } else if (platform::is_cpu_place(place)) {
    ConcatTensorsWithType(
        static_cast<const platform::CPUDeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Concat grad tensor not supported on place (%s)", place));
  }
}

void Group::SplitTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
271
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
272 273 274 275 276 277 278
    SplitTensorsWithType(
        static_cast<const platform::CUDADeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split grad tensor since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
279 280 281 282 283 284 285 286 287 288
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    SplitTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split xpu grad since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
289 290 291 292 293 294 295 296 297 298
#endif
  } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    SplitTensorsWithType(
        static_cast<const platform::NPUDeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split npu grad since it's not compiled with HCCL,"
        "Please recompile or reinstall Paddle with HCCL support."));
299 300 301 302 303 304 305 306
#endif
  } else if (platform::is_cpu_place(place)) {
    SplitTensorsWithType(
        static_cast<const platform::CPUDeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Split grad tensor not supported on place (%s)", place));
307 308 309 310 311
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
312
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

328 329 330
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
331
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
332 333
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
334 335 336
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
337
      parallel_ctx_(parallel_ctx),
338
      group_size_limits_(group_size_limits),
339
      find_unused_vars_each_step_(find_unused_vars) {
340
  VLOG(3) << "Start construct the Reducer ...";
341
  nrings_ = parallel_ctx->GetNRings();
342
  nranks_ = parallel_ctx->GetNRanks();
343 344 345 346
#ifdef PADDLE_WITH_XPU_BKCL
  comm_pool_.reset(new ::ThreadPool(1));
  comm_op_count_ = 0;
#endif
347 348
  // initialize groups
  InitializeGroups(group_indices);
349 350
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
351
    auto var = vars_[global_var_index];
352 353
    var->GradVarBase()->AddVoidHook(std::make_shared<std::function<void()>>(
        [=]() { this->AddDistHook(global_var_index); }));
354
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
355
  }
356 357 358 359 360 361

  // for checking var is ready once
  vars_marked_ready_.resize(vars_.size(), false);

  // Initialize local used vars
  local_used_vars_.resize(vars_.size(), 0);
362 363
}

364
void Reducer::InitializeDenseGroups(
365 366 367 368 369
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
370
    const auto &var_name = var->Name();
371 372
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index], false,
                      platform::errors::PreconditionNotMet(
373
                          "Tensor %s's GRAD must be LoDTensor, but received "
374 375 376 377 378 379
                          "GRAD is SelectedRows",
                          var_name));

    auto lod_tensor = var->MutableVar()->GetMutable<framework::LoDTensor>();
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(), true,
                      platform::errors::PreconditionNotMet(
380
                          "Tensor %s is not initialized.", var_name));
381
    const auto size = lod_tensor->numel();
382 383
    PADDLE_ENFORCE_GT(
        size, 0, platform::errors::PreconditionNotMet(
384
                     "The number of tensor %s's elements is 0.", var_name));
385 386 387 388
    all_length += size;

    p_group->length_.push_back(size);

389 390 391
    // for concat operator
    p_group->dense_tensors_.push_back(framework::Tensor());

392
    // check the dtype and place, it must be same.
393 394
    const auto &dtype = var->DataType();
    const auto &place = var->Place();
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
          dtype, p_group->dtype_,
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
              var_name, framework::DataTypeToString(p_group->dtype_),
              framework::DataTypeToString(dtype)));
      PADDLE_ENFORCE_EQ(place, place_,
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
                            var_name, place_, place));
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
413
  p_group->all_length_ = all_length;
414 415 416 417 418
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
419
// Then specify the actual memory in MarkDenseVarReady.
420 421 422 423 424 425
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
426 427
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
428 429 430 431 432 433 434

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
        variable_indices_.size(), 0,
        platform::errors::PreconditionNotMet(
435
            "The number of group[%d]'s elements is 0.", group_index));
436 437 438 439 440 441 442 443 444 445 446
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
447
      InitializeDenseGroups(variable_indices_, &group);
448 449 450
      auto tensor = group.dense_contents_.GetMutable<framework::LoDTensor>();
      tensor->Resize(framework::make_ddim({group.all_length_}))
          .mutable_data(place_, group.dtype_);
451
    }
452 453 454

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
455
    for (const auto var_index : variable_indices_) {
456 457 458 459 460 461
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
462
    groups_.emplace_back(std::move(group));
463
    // Debug Message For Reducer
464
    VLOG(3) << "The Group[" << group_index << "]:" << groups_.back();
465 466 467
  }
}

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
      node_deps_.empty(), true,
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
      // py_layer is not supported in DataParallel
      auto begin = grad_pending_node->begin();
      auto end = grad_pending_node->end();
      for (auto op_base = begin; op_base != end; op_base++) {
        PADDLE_ENFORCE_EQ(
            op_base->Type() != "py_layer", true,
            platform::errors::PreconditionNotMet(
                "Note: Currently PyLayer is not supported in DataParallel. For "
                "using PyLayer in a DataParallel model, you can skip gradient "
                "synchronization among multiple cards by 'no_sync', and "
                "manually implement 'all_reduce' before model optimization. "
                "There is an example showing specific implemetation processing "
                "in offical docs: https://www.paddlepaddle.org.cn/documentation"
                "/docs/api/paddle/DataParallel_cn.html"));
      }
505 506 507 508 509 510 511 512 513
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

514
void Reducer::TraverseBackwardGraph(
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
577
}
578

579 580 581 582 583
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
void Reducer::PrepareForBackward(
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  VLOG(3) << "after forward, then reset count for backward.";
584
  grad_need_hooks_ = true;
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
    group.sparse_contents_ = nullptr;
  });

  // reinitialize vars_marked_ready_ for next iteration
  vars_marked_ready_.clear();
  vars_marked_ready_.resize(vars_.size(), false);

  PADDLE_ENFORCE_EQ(
      groups_need_finalize_, false,
      platform::errors::PreconditionNotMet(
          "A serious error has occurred here. Please "
          "set find_unused_parameters=True to traverse backward graph "
          "in each step to prepare reduce in advance. If you have "
          "set, There may be several reasons for this error: "
          "1) Please note that all forward outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph. "
          "2) Used multiple forwards and one backward. You may be able to wrap "
          "multiple forwards in a model."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;

  if (find_unused_vars_once_ || find_unused_vars_each_step_) {
    unused_vars_.clear();
    TraverseBackwardGraph(outputs);
    // only check once in first step
    find_unused_vars_once_ = false;
  }

  if (find_unused_vars_each_step_ && unused_vars_.empty()) {
622 623 624 625 626 627 628 629
    LOG_FIRST_N(WARNING, 1)
        << "All parameters are involved in the backward pass. "
           "It is recommended to set find_unused_parameters to False "
           "to improve performance. However, if unused parameters "
           "appear in subsequent iterative training, then an error "
           "will occur. Please make it clear that in the subsequent "
           "training, there will be no parameters that are not used "
           "in the backward pass, and then set find_unused_parameters";
630 631 632
  }

  if (unused_vars_.size() == vars_.size()) {
633 634 635 636 637 638
    LOG_FIRST_N(WARNING, 1)
        << "There is no parameter in the device involved "
           "in the backward calculation. If there are "
           "parameters on other devices involved in the "
           "backward, then a serious error will occur here.";
  }
639 640 641 642 643
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
644
// MarkDenseVarReady. Find the position of the corresponding group
645 646 647 648 649
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
650
void Reducer::AddDistHook(size_t var_index) {
651 652 653 654 655 656
  PADDLE_ENFORCE_LT(var_index, variable_locators_.size(),
                    platform::errors::OutOfRange(
                        "Out of bounds variable index. it must be less"
                        "than %d, but it is %d",
                        variable_locators_.size(), var_index));

657 658 659 660 661
  // gradient synchronization is not required when grad_need_hooks_ is false.
  if (!grad_need_hooks_) {
    return;
  }

662 663 664
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
665

666 667
  local_used_vars_[var_index] = 1;

668
  // rebuild group when find_unused_vars_each_step_ is false
669
  if (NeedRebuildGroup()) {
670 671 672
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
673

674
  if (!has_marked_unused_vars_) {
675 676 677 678 679 680
    has_marked_unused_vars_ = true;
    for (const auto &unused_index : unused_vars_) {
      MarkVarReady(unused_index, false);
    }
  }

681 682
  MarkVarReady(var_index, true);
}
683

684
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
685 686
  groups_need_finalize_ = true;

687
  const auto &var_locator = variable_locators_[var_index];
688
  const auto group_index = var_locator.group_index;
689
  auto &group = groups_[group_index];
690

691 692 693 694
  // error happened, if the var is ready before.
  if (vars_marked_ready_[var_index]) {
    auto error_info = string::Sprintf(
        "Error happened, when parameter[%d][%s] has been ready before. "
695 696 697
        "Please set find_unused_parameters=True to traverse backward graph "
        "in each step to prepare reduce in advance. If you have set, "
        "there may be several reasons for this error: "
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
        "1) In multiple reentrant backward phase, some parameters are reused."
        "2) Using model parameters outside of forward function. Please "
        "make sure that model parameters are not shared in concurrent "
        "forward-backward passes.",
        var_index, vars_[var_index]->GradVarBase()->Name());

    PADDLE_ENFORCE_EQ(has_marked_unused_vars_, false,
                      platform::errors::PreconditionNotMet(error_info));

    error_info +=
        "3) Unused parameters retrieval is incorrect. "
        "The return value of forward will be used to retrieve"
        " the unused parameters of the entire model. These "
        "gradients of unused parameters will not be synchronized "
        "between multiple cards. However, if the unused "
        "parameters participate in the backward calculation "
        "again at a later time (e.g. after the forward function, "
        "the loss calculation uses the unused "
        "paramters of the forward and trigger backward), "
        "its gradient will be wrong.";

    PADDLE_ENFORCE_EQ(has_marked_unused_vars_, true,
                      platform::errors::PreconditionNotMet(error_info));
  } else {
    vars_marked_ready_[var_index] = true;
  }

725 726
  if (!group.is_sparse_) {
    // process dense group
727 728
    const auto inside_group_index = var_locator.inside_group_index;
    const auto length = group.length_[inside_group_index];
729
    auto &group_tensor = group.dense_tensors_[inside_group_index];
730

731
    if (is_used_var) {
732 733
      auto var_base = vars_[var_index]->GradVarBase();
      auto tensor = var_base->MutableVar()->GetMutable<framework::LoDTensor>();
734 735
      group_tensor.ShareDataWith(*tensor).Resize(
          {static_cast<int64_t>(length)});
736
    } else {
737 738
      // TODO(shenliang03): maybe save the memory
      // by avoiding tensor construction
739 740 741
      if (!group_tensor.IsInitialized()) {
        group_tensor.Resize({static_cast<int64_t>(length)});
        group_tensor.mutable_data(place_, group.dtype_);
742 743
      }

744
#ifdef PADDLE_WITH_XPU_BKCL
745 746 747 748
      if (platform::is_xpu_place(group_tensor.place())) {
        // TODO(liuyuhui) support XPU set constant
        VLOG(3) << "XPU doesn't support set_constant";
      }
749
#else
750 751 752 753 754
      auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
      if (HasGrad(var_index)) {
        auto var_base = vars_[var_index]->GradVarBase();
        auto tensor =
            var_base->MutableVar()->GetMutable<framework::LoDTensor>();
755 756
        group_tensor.ShareDataWith(*tensor).Resize(
            {static_cast<int64_t>(length)});
757 758
      } else {
        group_tensor.Resize({static_cast<int64_t>(length)});
759 760
        operators::math::set_constant(*dev_ctx, &group_tensor, 0.0);
      }
761
#endif
762 763 764
    }
  } else {
    // process sparse group
765 766 767 768 769 770 771 772 773 774
    PADDLE_ENFORCE_EQ(
        HasGrad(var_index), true,
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] should have gradient. "
            "Currently, DataParallel does not support sparse "
            "parameters without generating gradients during training. "
            "For example, if is_sparese=True is used in Embedding, "
            "the current step of this parameter cannot generate gradient "
            "because of stop_gradient/detatch, where error will occur.",
            var_index, vars_[var_index]->Name()));
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
    auto var_base = vars_[var_index]->GradVarBase();
    // need to check tensor type
    PADDLE_ENFORCE_EQ(
        var_base->Var().IsType<framework::SelectedRows>(), true,
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] must have a selectedrows gradient. "
            "Before forward pass, the parameter type is inferred to be "
            "SelectedRows, but after backward pass, its actual type becomes "
            "LodTensor. It is currently not supported by DataParallel. "
            "For example, if sparse embedding is used, and the weight of "
            "embedding is shared with subsequent dense parameters, then "
            "the parameter gradient of the embedding will be converted "
            "to dense parameters.",
            var_index, vars_[var_index]->Name()));

    group.sparse_contents_ = var_base->MutableVar();
791
  }
792

793 794 795 796 797 798 799 800 801 802
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

803 804
// TODO(liuyuhui): If BKCL support non-blocking communication, it should be
// fixed as same as multi gpus card trainging.
805
void Reducer::MarkGroupReady(size_t group_index) {
806 807 808 809 810 811 812 813
  PADDLE_ENFORCE_GE(
      group_index, next_group_,
      platform::errors::PreconditionNotMet(
          "The index of the incoming group must be greater "
          "than or equal to the previously synchronized group index, "
          "expect it to greater than or equal to %d, but got %d.",
          next_group_, group_index));

814
  if (group_index > next_group_) {
815
    VLOG(3) << "It will adjust the order of group in next batch automatically";
816 817 818 819 820
    return;
  }

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
821 822
    UNUSED auto &group = groups_[next_group_];
    UNUSED const int run_order = next_group_ % nrings_;
823 824 825 826 827 828 829

    // For CUDA or XPU, compute_stream --> comm_stream.
    // For CPU, do nothing.
    // NOTE. Because concat uses the comm_stream,
    // so we expose WaitCompute() interface and call
    // it here.
    parallel_ctx_->WaitCompute(run_order);
830 831 832 833 834 835 836 837
#ifdef PADDLE_WITH_XPU_BKCL
    {
      std::lock_guard<std::mutex> lock(mutex_);
      comm_op_count_ += 1;  // lock
    }
    // TODO(liuyuhui): Add try catch to deal with exception later,
    // otherwise the main thread will continue to run when an exception is
    // thrown in comm_pool_.
838 839
    auto next_group = next_group_;
    comm_pool_->enqueue([this, run_order, next_group, &group] {
840
      auto dev_id = place_.device;
841
      platform::SetXPUDeviceId(dev_id);
842
      FusedAllReduceSchedule(run_order, group, next_group);
843 844 845 846
      {
        std::lock_guard<std::mutex> lock(mutex_);
        comm_op_count_ -= 1;  // lock
        cv_.notify_all();
847
      }
848
    });
849
#elif defined(PADDLE_WITH_RCCL) || defined(PADDLE_WITH_NCCL) || \
K
kuizhiqing 已提交
850
    defined(PADDLE_WITH_GLOO) || defined(PADDLE_WITH_ASCEND_CL)
851
    FusedAllReduceSchedule(run_order, group, next_group_);
852 853
#else
    PADDLE_THROW(platform::errors::PreconditionNotMet(
854
        "Not compiled with BKCL or NCCL or GLOO."));
855 856 857 858
#endif
  }
}

859 860 861 862 863
void Reducer::FusedAllReduceSchedule(const int run_order, Group &group,
                                     const int curr_group_index) {
  // The overall timeline: concat > div_nranks > allreduce > split
  // dev_context is used to select different stream
  const auto &dev_context = *parallel_ctx_->GetDeviceContext(run_order);
864
  if (group.is_sparse_) {
865 866 867 868 869
    VLOG(3) << "sparse group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
    group.DivNRanks(dev_context, nranks_);
    parallel_ctx_->AllReduceByStream(*group.sparse_contents_,
                                     group.sparse_contents_, run_order, false);
870
  } else {
871 872
    VLOG(3) << "dense group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
873 874
    // Select common commstream to concat tensors
    // group.dense_tensors ---> group.dense_contents_
875
    group.ConcatTensors(dev_context);
876

877
// NOTE(liuyuhui): ConcatTensors use communication stream, but BKCL only support
878 879
// default stream for communicating, so there exist some problems in
// synchronization. And need to add a WaitComm there.
880 881
// TODO(liuyuhui): If BKCL support non-blocking communication, it should be
// fixed as multi gpus card trainging.
882
#ifdef PADDLE_WITH_XPU_BKCL
883 884 885
    if (platform::is_xpu_place(group.dense_tensors_[0].place())) {
      parallel_ctx_->WaitComm(run_order);
    }
886 887
#endif

888
    group.DivNRanks(dev_context, nranks_);
889 890 891
    // Start allreduce
    parallel_ctx_->AllReduceByStream(
        group.dense_contents_, &(group.dense_contents_), run_order, false);
892

893
    // Select communication stream to split tensors
894
    // group.dense_contents_ ---> group.dense_tensors
895
    group.SplitTensors(dev_context);
896 897 898
  }
}

899
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
900 901 902 903 904 905 906 907 908
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
      rebuild_vars_.size(), vars_.size(),
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
          vars_.size(), rebuild_vars_.size()));
909 910 911 912 913 914 915 916 917 918 919 920
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
  auto rebuild_group_indices =
      AssignGroupBySize(rebuild_vars_, is_sparse_gradient_, group_size_limits_,
                        rebuild_var_indices_);
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
void Reducer::ProcessUnusedDenseVars() {
  // The calculation stream must be used here to
  // avoid conflicts with communication.
  VLOG(3) << "Local used vars : "
          << string::join_strings(local_used_vars_, ',');
  const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
  // H2D is to allreduce the local_used_vars_
  auto *global_used_tensor =
      global_used_vars_.GetMutable<framework::LoDTensor>();
  framework::TensorFromVector<int>(local_used_vars_, *dev_ctx,
                                   global_used_tensor);
  parallel_ctx_->AllReduceByStream(global_used_vars_, &global_used_vars_, 0,
                                   true);
  framework::TensorToVector<int>(*global_used_tensor, *dev_ctx,
                                 &local_used_vars_);

  // sync compute stream to get global used var message,
  // but maybe affect speed performance
  parallel_ctx_->SynchronizeCompute();
  VLOG(3) << "Global used vars : "
          << string::join_strings(local_used_vars_, ',');

  for (const auto var_index : unused_vars_) {
    const bool global_unused = (local_used_vars_[var_index] == 0);

    // global used but local unused, set grad
    VLOG(3) << "Var [" << var_index << "] [" << vars_[var_index]->Name()
            << "] global_unused:" << global_unused
            << "  has grad: " << HasGrad(var_index);

    if (!global_unused) {
      VLOG(3) << "Start process unused Var";
      // 1. source var base
      const auto &var_locator = variable_locators_[var_index];
      const auto group_index = var_locator.group_index;
      const auto &group = groups_[group_index];
      const auto inside_group_index = var_locator.inside_group_index;
      const auto &src_tensor = group.dense_tensors_[inside_group_index];
      // sparse no need to check and no support find_unused_parameters
      if (group.is_sparse_) {
        continue;
      }
      // 2. destination var base
      auto dest_var_base = vars_[var_index];
      auto *dest_tensor =
          dest_var_base->MutableVar()->GetMutable<framework::LoDTensor>();
      const auto &dest_dims = dest_tensor->dims();

      // 3. create grad var base or get grad var base
      auto grad_var_base_tmp = dest_var_base->MutableGradVarBase();
971 972 973 974
      // NOTE(haohongxiang): Calling SetIsEmpty here is to make sure that
      // gradient accumulation can continue normally after clear_gradients()
      // especiall in cases including complex control flow.
      grad_var_base_tmp->SharedVar()->SetIsEmpty(false);
975 976 977 978 979

      // 4. set grad tensor
      auto *dest_grad_tensor =
          grad_var_base_tmp->MutableVar()->GetMutable<framework::LoDTensor>();
      const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
980 981
      paddle::framework::TensorCopy(src_tensor, place_, *dev_ctx,
                                    dest_grad_tensor);
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
      dest_grad_tensor->Resize(dest_dims);
    }
  }
}

bool Reducer::HasGrad(size_t var_index) {
  const auto grad_var = vars_[var_index]->GradVarBase();
  if (!grad_var || !grad_var->Var().IsInitialized()) {
    return false;
  }

  const auto &var = grad_var->Var();
  if (var.IsType<framework::LoDTensor>()) {
    if (var.Get<framework::LoDTensor>().IsInitialized()) {
      return true;
    }
  } else if (var.IsType<framework::SelectedRows>()) {
    if (var.Get<framework::SelectedRows>().value().IsInitialized()) {
      return true;
    }
  } else {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Only support LoDTensor and SelectedRows for gradient var"));
  }
  return false;
}

1009
void Reducer::FinalizeBackward() {
1010
  groups_need_finalize_ = false;
1011
  grad_need_hooks_ = false;
1012 1013 1014 1015 1016 1017
#ifdef PADDLE_WITH_XPU_BKCL
  {
    std::unique_lock<std::mutex> lock(mutex_);
    cv_.wait(lock, [&] { return comm_op_count_ == 0; });
  }
#endif
1018

1019 1020
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
1021
    parallel_ctx_->WaitComm(i);
1022 1023
  }

1024
  if (NeedRebuildGroup()) {
1025 1026 1027 1028 1029
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    group_indices_ = std::move(rebuild_group_indices);
    InitializeGroups(group_indices_);
  }
1030

1031
  if (find_unused_vars_each_step_) {
1032
// TODO(liuyuhui) support xpu about Tensorcopy/TensorFromVector/TensorToVector
1033
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
K
kuizhiqing 已提交
1034
    defined(PADDLE_WITH_GLOO) || defined(PADDLE_WITH_ASCEND_CL)
1035 1036 1037 1038 1039 1040 1041 1042 1043
    ProcessUnusedDenseVars();
#endif
    // Initialize local used vars
    local_used_vars_.clear();
    local_used_vars_.resize(vars_.size(), 0);
    VLOG(3) << "ProcessUnusedDenseVars is finished.";
  }

  VLOG(3) << "In the batch, Reducer is finished.";
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
1056 1057
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
1058 1059 1060 1061 1062
  PADDLE_ENFORCE_EQ(vars.size(), is_sparse_gradient.size(),
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
                        vars.size(), is_sparse_gradient.size()));
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
  PADDLE_ENFORCE_EQ(true, check_perm(tensor_indices),
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
1092 1093 1094 1095 1096 1097 1098

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
1099
      // we keep sparse var a single group
1100
      res.push_back({tensor_real_index});
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
    if (var->Var().IsType<framework::LoDTensor>()) {
      var_size = var->Var().Get<framework::LoDTensor>().numel();
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
1117
    group_info.first.push_back(tensor_real_index);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
        group_index.empty(), true,
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
1148 1149 1150 1151 1152 1153
  if (tensor_indices.empty()) {
    std::sort(res.begin(), res.end(),
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
1154 1155 1156 1157 1158 1159
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle