reducer.cc 47.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

17 18
#include <iostream>

19
#include "paddle/fluid/framework/tensor_util.h"
20
#include "paddle/fluid/imperative/layer.h"
21
#include "paddle/fluid/imperative/parallel_context.h"
22 23
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
24 25 26
#ifdef PADDLE_WITH_XPU_BKCL
#include "paddle/fluid/platform/device/xpu/enforce_xpu.h"
#endif
27
#include "paddle/fluid/string/string_helper.h"
28
#include "paddle/phi/core/dense_tensor.h"
29 30 31
namespace paddle {
namespace imperative {

K
kuizhiqing 已提交
32 33
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) ||     \
    defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_GLOO) || \
Z
zn 已提交
34
    defined(PADDLE_WITH_ASCEND_CL) || defined(PADDLE_WITH_CNCL)
35 36 37 38
// div the nranks
void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
  framework::Tensor *tensor =
      is_sparse_
39
          ? sparse_contents_->GetMutable<phi::SelectedRows>()->mutable_value()
40 41 42
          : dense_contents_.GetMutable<framework::LoDTensor>();

  if (platform::is_gpu_place(tensor->place())) {
43
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
44 45
    DivNRanks(tensor, nranks, context);
#endif
K
kuizhiqing 已提交
46 47 48
  } else if (platform::is_npu_place(tensor->place())) {
    // TODO(kuizhiqing)
    VLOG(4) << "divnrank for npu not support yet";
49
  } else if (platform::is_cpu_place(tensor->place())) {
50 51
    VLOG(4) << "before div 2" << *tensor;
    VLOG(4) << "NDiv for cpu devices : rank = " << nranks;
52 53 54 55 56 57
#ifdef PADDLE_WITH_HIP
    if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
      PADDLE_THROW(paddle::platform::errors::Fatal(
          "Unsupport BF16 in DataParallel for now"));
    }
    framework::VisitDataTypeForHIP(
58
        dtype_,
L
Leo Chen 已提交
59
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
60
#else
L
Leo Chen 已提交
61 62 63
    framework::VisitDataType(
        dtype_,
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
64
#endif
65
    VLOG(4) << "after div 2" << *tensor;
66 67 68 69
  } else if (platform::is_xpu_place(tensor->place())) {
#ifdef PADDLE_WITH_XPU_BKCL
// TODO(liuyuhui) support xpu about div nranks in the future
#endif
Z
zn 已提交
70 71 72
  } else if (platform::is_mlu_place(tensor->place())) {
    // TODO(zhangna)
    VLOG(4) << "divnrank for mlu not support yet";
73 74 75
  }
}

76 77 78 79 80 81
template <typename DeviceContext, typename T>
static void ConcatTensorsForAllReduce(
    const DeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents) {
  operators::math::ConcatFunctor<DeviceContext, T> concat_functor_;
82 83 84
  concat_functor_(context,
                  dense_tensors_,
                  0,
85 86 87 88 89
                  p_dense_contents->GetMutable<framework::LoDTensor>());
}

template <typename DeviceContext, typename T>
static void SplitTensorsForAllReduce(
90 91
    const DeviceContext &context,
    framework::Variable *p_dense_contents,
92 93 94 95 96 97 98
    std::vector<framework::Tensor> *p_dense_tensors) {
  auto *in = p_dense_contents->GetMutable<framework::LoDTensor>();
  std::vector<framework::Tensor *> outs;
  std::vector<const framework::Tensor *> shape_refer;

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());
99

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  // Sometimes direct copies will be faster
  if (p_dense_tensors->size() < 10) {
    operators::StridedMemcpyWithAxis0<T>(context, *in, shape_refer, &outs);
  } else {
    operators::math::SplitFunctor<DeviceContext, T> split_functor_;
    split_functor_(context, *in, shape_refer, 0, &outs);
  }
}

// context is used to select the stream for concat
template <typename DeviceContext>
static void ConcatTensorsWithType(
    const DeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
121
    case framework::proto::VarType::FP16:
122 123
      ConcatTensorsForAllReduce<DeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
124 125
      break;
    case framework::proto::VarType::FP32:
126 127
      ConcatTensorsForAllReduce<DeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
128 129
      break;
    case framework::proto::VarType::FP64:
130 131
      ConcatTensorsForAllReduce<DeviceContext, double>(
          context, dense_tensors_, p_dense_contents);
132 133 134 135 136
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
137
          framework::DataTypeToString(type)));
138 139 140 141
  }
}

// context is used to select the stream for split
142 143
template <typename DeviceContext>
static void SplitTensorsWithType(
144 145
    const DeviceContext &context,
    framework::Variable *p_dense_contents,
146 147 148
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
149
    case framework::proto::VarType::FP16:
150 151
      SplitTensorsForAllReduce<DeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
152 153
      break;
    case framework::proto::VarType::FP32:
154 155
      SplitTensorsForAllReduce<DeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
156 157
      break;
    case framework::proto::VarType::FP64:
158 159
      SplitTensorsForAllReduce<DeviceContext, double>(
          context, p_dense_contents, p_dense_tensors);
160 161 162 163 164
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
165 166 167 168
          framework::DataTypeToString(type)));
  }
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
#ifdef PADDLE_WITH_XPU_BKCL
template <>
void SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors) {
  auto *in = p_dense_contents->GetMutable<framework::LoDTensor>();
  std::vector<framework::Tensor *> outs;
  std::vector<const framework::Tensor *> shape_refer;

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());

  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  operators::math::SplitFunctor<platform::XPUDeviceContext, float>
      split_functor_;
  split_functor_(context, *in, shape_refer, 0, &outs);
}

// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
K
kuizhiqing 已提交
223 224 225 226 227 228 229 230 231
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

Z
zn 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
#ifdef PADDLE_WITH_CNCL
// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::MLUDeviceContext>(
    const platform::MLUDeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP16:
      ConcatTensorsForAllReduce<platform::MLUDeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
      break;
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::MLUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::MLUDeviceContext>(
    const platform::MLUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP16:
      SplitTensorsForAllReduce<platform::MLUDeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
      break;
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::MLUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

282 283 284
void Group::ConcatTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
285
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
286 287
    ConcatTensorsWithType(
        static_cast<const platform::CUDADeviceContext &>(context),
288 289 290
        dense_tensors_,
        &dense_contents_,
        dtype_);
291 292 293 294
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat grad tensors since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
295 296 297 298 299
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    ConcatTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
300 301 302
        dense_tensors_,
        &dense_contents_,
        dtype_);
303 304 305 306
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat xpu grads since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
307 308 309 310 311
#endif
  } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    ConcatTensorsWithType(
        static_cast<const platform::NPUDeviceContext &>(context),
312 313 314
        dense_tensors_,
        &dense_contents_,
        dtype_);
315 316 317 318
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat npu grads since it's not compiled with HCCL,"
        "Please recompile or reinstall Paddle with HCCL support."));
Z
zn 已提交
319 320 321 322 323
#endif
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_CNCL
    ConcatTensorsWithType(
        static_cast<const platform::MLUDeviceContext &>(context),
324 325 326
        dense_tensors_,
        &dense_contents_,
        dtype_);
Z
zn 已提交
327 328 329 330
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat mlu grads since it's not compiled with CNCL,"
        "Please recompile or reinstall Paddle with CNCL support."));
331 332
#endif
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
333 334 335 336
    ConcatTensorsWithType(static_cast<const phi::CPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
337 338 339 340 341 342 343 344 345
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Concat grad tensor not supported on place (%s)", place));
  }
}

void Group::SplitTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
346
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
347 348
    SplitTensorsWithType(
        static_cast<const platform::CUDADeviceContext &>(context),
349 350 351
        &dense_contents_,
        &dense_tensors_,
        dtype_);
352 353 354 355
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split grad tensor since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
356 357 358 359 360
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    SplitTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
361 362 363
        &dense_contents_,
        &dense_tensors_,
        dtype_);
364 365 366 367
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split xpu grad since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
368 369 370 371 372
#endif
  } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    SplitTensorsWithType(
        static_cast<const platform::NPUDeviceContext &>(context),
373 374 375
        &dense_contents_,
        &dense_tensors_,
        dtype_);
376 377 378 379
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split npu grad since it's not compiled with HCCL,"
        "Please recompile or reinstall Paddle with HCCL support."));
Z
zn 已提交
380 381 382 383 384
#endif
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_CNCL
    SplitTensorsWithType(
        static_cast<const platform::MLUDeviceContext &>(context),
385 386 387
        &dense_contents_,
        &dense_tensors_,
        dtype_);
Z
zn 已提交
388 389 390 391
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split mlu grad since it's not compiled with CNCL,"
        "Please recompile or reinstall Paddle with CNCL support."));
392 393
#endif
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
394 395 396 397
    SplitTensorsWithType(static_cast<const phi::CPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
398 399 400
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Split grad tensor not supported on place (%s)", place));
401 402 403 404 405
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
406
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

422 423 424
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
425
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
426 427
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
428 429 430
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
431
      parallel_ctx_(parallel_ctx),
432
      group_size_limits_(group_size_limits),
433
      find_unused_vars_each_step_(find_unused_vars) {
434
  VLOG(3) << "Start construct the Reducer ...";
435
  nrings_ = parallel_ctx->GetNRings();
436
  nranks_ = parallel_ctx->GetNRanks();
437 438
  // initialize groups
  InitializeGroups(group_indices);
439 440
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
441
    auto var = vars_[global_var_index];
442 443
    var->GradVarBase()->AddVoidHook(std::make_shared<std::function<void()>>(
        [=]() { this->AddDistHook(global_var_index); }));
444
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
445
  }
446 447 448 449 450 451

  // for checking var is ready once
  vars_marked_ready_.resize(vars_.size(), false);

  // Initialize local used vars
  local_used_vars_.resize(vars_.size(), 0);
452 453
}

454
void Reducer::InitializeDenseGroups(
455 456 457 458 459
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
460
    const auto &var_name = var->Name();
461 462
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index],
                      false,
463
                      platform::errors::PreconditionNotMet(
464
                          "Tensor %s's GRAD must be LoDTensor, but received "
465 466 467 468
                          "GRAD is SelectedRows",
                          var_name));

    auto lod_tensor = var->MutableVar()->GetMutable<framework::LoDTensor>();
469 470
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(),
                      true,
471
                      platform::errors::PreconditionNotMet(
472
                          "Tensor %s is not initialized.", var_name));
473
    const auto size = lod_tensor->numel();
474
    PADDLE_ENFORCE_GT(
475 476
        size,
        0,
477 478
        platform::errors::PreconditionNotMet(
            "The number of tensor %s's elements is 0.", var_name));
479 480 481 482
    all_length += size;

    p_group->length_.push_back(size);

483 484 485
    // for concat operator
    p_group->dense_tensors_.push_back(framework::Tensor());

486
    // check the dtype and place, it must be same.
487 488
    const auto &dtype = var->DataType();
    const auto &place = var->Place();
489 490
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
491 492
          dtype,
          p_group->dtype_,
493 494 495
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
496 497
              var_name,
              framework::DataTypeToString(p_group->dtype_),
498
              framework::DataTypeToString(dtype)));
499 500
      PADDLE_ENFORCE_EQ(place,
                        place_,
501 502 503
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
504 505 506
                            var_name,
                            place_,
                            place));
507 508 509 510 511
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
512
  p_group->all_length_ = all_length;
513 514 515 516 517
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
518
// Then specify the actual memory in MarkDenseVarReady.
519 520 521 522 523 524
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
525 526
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
527 528 529 530 531

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
532 533
        variable_indices_.size(),
        0,
534
        platform::errors::PreconditionNotMet(
535
            "The number of group[%d]'s elements is 0.", group_index));
536 537 538 539 540 541 542 543 544 545 546
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
547
      InitializeDenseGroups(variable_indices_, &group);
548
      auto tensor = group.dense_contents_.GetMutable<framework::LoDTensor>();
549
      tensor->Resize(phi::make_ddim({group.all_length_}))
550
          .mutable_data(place_, framework::TransToPhiDataType(group.dtype_));
551
    }
552 553 554

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
555
    for (const auto var_index : variable_indices_) {
556 557 558 559 560 561
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
562
    groups_.emplace_back(std::move(group));
563
    // Debug Message For Reducer
564
    VLOG(3) << "The Group[" << group_index << "]:" << groups_.back();
565 566 567
  }
}

568 569
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
570 571
      node_deps_.empty(),
      true,
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
591 592 593 594 595
      // py_layer is not supported in DataParallel
      auto begin = grad_pending_node->begin();
      auto end = grad_pending_node->end();
      for (auto op_base = begin; op_base != end; op_base++) {
        PADDLE_ENFORCE_EQ(
596 597
            op_base->Type() != "py_layer",
            true,
598 599 600 601 602 603 604 605 606
            platform::errors::PreconditionNotMet(
                "Note: Currently PyLayer is not supported in DataParallel. For "
                "using PyLayer in a DataParallel model, you can skip gradient "
                "synchronization among multiple cards by 'no_sync', and "
                "manually implement 'all_reduce' before model optimization. "
                "There is an example showing specific implemetation processing "
                "in offical docs: https://www.paddlepaddle.org.cn/documentation"
                "/docs/api/paddle/DataParallel_cn.html"));
      }
607 608 609 610 611 612 613 614 615
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

616
void Reducer::TraverseBackwardGraph(
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
679
}
680

681 682 683 684 685
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
void Reducer::PrepareForBackward(
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  VLOG(3) << "after forward, then reset count for backward.";
686
  grad_need_hooks_ = true;
687 688 689 690 691 692 693 694 695 696 697
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
    group.sparse_contents_ = nullptr;
  });

  // reinitialize vars_marked_ready_ for next iteration
  vars_marked_ready_.clear();
  vars_marked_ready_.resize(vars_.size(), false);

  PADDLE_ENFORCE_EQ(
698 699
      groups_need_finalize_,
      false,
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
      platform::errors::PreconditionNotMet(
          "A serious error has occurred here. Please "
          "set find_unused_parameters=True to traverse backward graph "
          "in each step to prepare reduce in advance. If you have "
          "set, There may be several reasons for this error: "
          "1) Please note that all forward outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph. "
          "2) Used multiple forwards and one backward. You may be able to wrap "
          "multiple forwards in a model."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;

  if (find_unused_vars_once_ || find_unused_vars_each_step_) {
    unused_vars_.clear();
    TraverseBackwardGraph(outputs);
    // only check once in first step
    find_unused_vars_once_ = false;
  }

  if (find_unused_vars_each_step_ && unused_vars_.empty()) {
725 726 727 728 729 730 731 732
    LOG_FIRST_N(WARNING, 1)
        << "All parameters are involved in the backward pass. "
           "It is recommended to set find_unused_parameters to False "
           "to improve performance. However, if unused parameters "
           "appear in subsequent iterative training, then an error "
           "will occur. Please make it clear that in the subsequent "
           "training, there will be no parameters that are not used "
           "in the backward pass, and then set find_unused_parameters";
733 734 735
  }

  if (unused_vars_.size() == vars_.size()) {
736 737 738 739 740 741
    LOG_FIRST_N(WARNING, 1)
        << "There is no parameter in the device involved "
           "in the backward calculation. If there are "
           "parameters on other devices involved in the "
           "backward, then a serious error will occur here.";
  }
742 743 744 745 746
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
747
// MarkDenseVarReady. Find the position of the corresponding group
748 749 750 751 752
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
753
void Reducer::AddDistHook(size_t var_index) {
754 755
  PADDLE_ENFORCE_LT(var_index,
                    variable_locators_.size(),
756 757 758
                    platform::errors::OutOfRange(
                        "Out of bounds variable index. it must be less"
                        "than %d, but it is %d",
759 760
                        variable_locators_.size(),
                        var_index));
761

762 763 764 765 766
  // gradient synchronization is not required when grad_need_hooks_ is false.
  if (!grad_need_hooks_) {
    return;
  }

767 768 769
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
770

771 772
  local_used_vars_[var_index] = 1;

773
  // rebuild group when find_unused_vars_each_step_ is false
774
  if (NeedRebuildGroup()) {
775 776 777
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
778

779
  if (!has_marked_unused_vars_) {
780 781 782 783 784 785
    has_marked_unused_vars_ = true;
    for (const auto &unused_index : unused_vars_) {
      MarkVarReady(unused_index, false);
    }
  }

786 787
  MarkVarReady(var_index, true);
}
788

789
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
790 791
  groups_need_finalize_ = true;

792
  const auto &var_locator = variable_locators_[var_index];
793
  const auto group_index = var_locator.group_index;
794
  auto &group = groups_[group_index];
795

796 797 798 799
  // error happened, if the var is ready before.
  if (vars_marked_ready_[var_index]) {
    auto error_info = string::Sprintf(
        "Error happened, when parameter[%d][%s] has been ready before. "
800 801 802
        "Please set find_unused_parameters=True to traverse backward graph "
        "in each step to prepare reduce in advance. If you have set, "
        "there may be several reasons for this error: "
803 804 805 806
        "1) In multiple reentrant backward phase, some parameters are reused."
        "2) Using model parameters outside of forward function. Please "
        "make sure that model parameters are not shared in concurrent "
        "forward-backward passes.",
807 808
        var_index,
        vars_[var_index]->GradVarBase()->Name());
809

810 811
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      false,
812 813 814 815 816 817 818 819 820 821 822 823 824 825
                      platform::errors::PreconditionNotMet(error_info));

    error_info +=
        "3) Unused parameters retrieval is incorrect. "
        "The return value of forward will be used to retrieve"
        " the unused parameters of the entire model. These "
        "gradients of unused parameters will not be synchronized "
        "between multiple cards. However, if the unused "
        "parameters participate in the backward calculation "
        "again at a later time (e.g. after the forward function, "
        "the loss calculation uses the unused "
        "paramters of the forward and trigger backward), "
        "its gradient will be wrong.";

826 827
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      true,
828 829 830 831 832
                      platform::errors::PreconditionNotMet(error_info));
  } else {
    vars_marked_ready_[var_index] = true;
  }

833 834
  if (!group.is_sparse_) {
    // process dense group
835 836
    const auto inside_group_index = var_locator.inside_group_index;
    const auto length = group.length_[inside_group_index];
837
    auto &group_tensor = group.dense_tensors_[inside_group_index];
838

839
    if (is_used_var) {
840 841
      auto var_base = vars_[var_index]->GradVarBase();
      auto tensor = var_base->MutableVar()->GetMutable<framework::LoDTensor>();
842 843
      group_tensor.ShareDataWith(*tensor).Resize(
          {static_cast<int64_t>(length)});
844
    } else {
845 846
      // TODO(shenliang03): maybe save the memory
      // by avoiding tensor construction
847 848
      if (!group_tensor.IsInitialized()) {
        group_tensor.Resize({static_cast<int64_t>(length)});
849
        group_tensor.mutable_data(place_,
850
                                  framework::TransToPhiDataType(group.dtype_));
851 852
      }

853
#ifdef PADDLE_WITH_XPU_BKCL
854
      if (platform::is_xpu_place(group_tensor.place())) {
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
        auto dev_ctx = static_cast<platform::XPUDeviceContext *>(
            platform::DeviceContextPool::Instance().Get(place_));
        if (HasGrad(var_index)) {
          auto var_base = vars_[var_index]->GradVarBase();
          auto tensor =
              var_base->MutableVar()->GetMutable<framework::LoDTensor>();
          group_tensor.ShareDataWith(*tensor).Resize(
              {static_cast<int64_t>(length)});
        } else {
          group_tensor.Resize({static_cast<int64_t>(length)});
          int r = xpu::constant(dev_ctx->x_context(),
                                reinterpret_cast<float *>(group_tensor.data()),
                                group_tensor.numel(),
                                0.0f);
          PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
          PADDLE_ENFORCE_XPU_SUCCESS(xpu_wait(dev_ctx->stream()));
        }
872
      }
Z
zn 已提交
873 874 875 876 877
#elif defined(PADDLE_WITH_CNCL)
      if (platform::is_mlu_place(group_tensor.place())) {
        // TODO(liuyuhui) support MLU set constant
        VLOG(3) << "MLU doesn't support set_constant";
      }
878
#else
879 880 881 882 883
      auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
      if (HasGrad(var_index)) {
        auto var_base = vars_[var_index]->GradVarBase();
        auto tensor =
            var_base->MutableVar()->GetMutable<framework::LoDTensor>();
884 885
        group_tensor.ShareDataWith(*tensor).Resize(
            {static_cast<int64_t>(length)});
886 887
      } else {
        group_tensor.Resize({static_cast<int64_t>(length)});
888
        phi::funcs::set_constant(*dev_ctx, &group_tensor, 0.0);
889
      }
890
#endif
891 892 893
    }
  } else {
    // process sparse group
894
    PADDLE_ENFORCE_EQ(
895 896
        HasGrad(var_index),
        true,
897 898 899 900 901 902 903
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] should have gradient. "
            "Currently, DataParallel does not support sparse "
            "parameters without generating gradients during training. "
            "For example, if is_sparese=True is used in Embedding, "
            "the current step of this parameter cannot generate gradient "
            "because of stop_gradient/detatch, where error will occur.",
904 905
            var_index,
            vars_[var_index]->Name()));
906 907 908
    auto var_base = vars_[var_index]->GradVarBase();
    // need to check tensor type
    PADDLE_ENFORCE_EQ(
909 910
        var_base->Var().IsType<phi::SelectedRows>(),
        true,
911 912 913 914 915 916 917 918 919
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] must have a selectedrows gradient. "
            "Before forward pass, the parameter type is inferred to be "
            "SelectedRows, but after backward pass, its actual type becomes "
            "LodTensor. It is currently not supported by DataParallel. "
            "For example, if sparse embedding is used, and the weight of "
            "embedding is shared with subsequent dense parameters, then "
            "the parameter gradient of the embedding will be converted "
            "to dense parameters.",
920 921
            var_index,
            vars_[var_index]->Name()));
922 923

    group.sparse_contents_ = var_base->MutableVar();
924
  }
925

926 927 928 929 930 931 932 933 934 935
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

936
// TODO(liuyuhui): If BKCL support non-blocking communication, it should be
937
// fixed as same as multi gpus card training.
938
void Reducer::MarkGroupReady(size_t group_index) {
939
  PADDLE_ENFORCE_GE(
940 941
      group_index,
      next_group_,
942 943 944 945
      platform::errors::PreconditionNotMet(
          "The index of the incoming group must be greater "
          "than or equal to the previously synchronized group index, "
          "expect it to greater than or equal to %d, but got %d.",
946 947
          next_group_,
          group_index));
948

949
  if (group_index > next_group_) {
950
    VLOG(3) << "It will adjust the order of group in next batch automatically";
951 952 953 954 955
    return;
  }

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
956 957
    UNUSED auto &group = groups_[next_group_];
    UNUSED const int run_order = next_group_ % nrings_;
958 959 960 961 962 963 964

    // For CUDA or XPU, compute_stream --> comm_stream.
    // For CPU, do nothing.
    // NOTE. Because concat uses the comm_stream,
    // so we expose WaitCompute() interface and call
    // it here.
    parallel_ctx_->WaitCompute(run_order);
965
    FusedAllReduceSchedule(run_order, group, next_group_);
966 967 968
  }
}

969 970
void Reducer::FusedAllReduceSchedule(const int run_order,
                                     Group &group,
971 972 973 974
                                     const int curr_group_index) {
  // The overall timeline: concat > div_nranks > allreduce > split
  // dev_context is used to select different stream
  const auto &dev_context = *parallel_ctx_->GetDeviceContext(run_order);
975
  if (group.is_sparse_) {
976 977 978
    VLOG(3) << "sparse group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
    group.DivNRanks(dev_context, nranks_);
979 980
    parallel_ctx_->AllReduceByStream(
        *group.sparse_contents_, group.sparse_contents_, run_order, false);
981
  } else {
982 983
    VLOG(3) << "dense group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
984 985
    // Select common commstream to concat tensors
    // group.dense_tensors ---> group.dense_contents_
986
    group.ConcatTensors(dev_context);
987

988
    group.DivNRanks(dev_context, nranks_);
989 990 991
    // Start allreduce
    parallel_ctx_->AllReduceByStream(
        group.dense_contents_, &(group.dense_contents_), run_order, false);
992

993
    // Select communication stream to split tensors
994
    // group.dense_contents_ ---> group.dense_tensors
995
    group.SplitTensors(dev_context);
996 997 998
  }
}

999
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
1000 1001 1002 1003
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
1004 1005
      rebuild_vars_.size(),
      vars_.size(),
1006 1007 1008
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
1009 1010
          vars_.size(),
          rebuild_vars_.size()));
1011 1012
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
1013 1014 1015 1016
  auto rebuild_group_indices = AssignGroupBySize(rebuild_vars_,
                                                 is_sparse_gradient_,
                                                 group_size_limits_,
                                                 rebuild_var_indices_);
1017 1018 1019 1020 1021 1022 1023
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

1024 1025 1026 1027 1028 1029 1030 1031 1032
void Reducer::ProcessUnusedDenseVars() {
  // The calculation stream must be used here to
  // avoid conflicts with communication.
  VLOG(3) << "Local used vars : "
          << string::join_strings(local_used_vars_, ',');
  const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
  // H2D is to allreduce the local_used_vars_
  auto *global_used_tensor =
      global_used_vars_.GetMutable<framework::LoDTensor>();
1033 1034 1035 1036 1037 1038
  framework::TensorFromVector<int>(
      local_used_vars_, *dev_ctx, global_used_tensor);
  parallel_ctx_->AllReduceByStream(
      global_used_vars_, &global_used_vars_, 0, true);
  framework::TensorToVector<int>(
      *global_used_tensor, *dev_ctx, &local_used_vars_);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073

  // sync compute stream to get global used var message,
  // but maybe affect speed performance
  parallel_ctx_->SynchronizeCompute();
  VLOG(3) << "Global used vars : "
          << string::join_strings(local_used_vars_, ',');

  for (const auto var_index : unused_vars_) {
    const bool global_unused = (local_used_vars_[var_index] == 0);

    // global used but local unused, set grad
    VLOG(3) << "Var [" << var_index << "] [" << vars_[var_index]->Name()
            << "] global_unused:" << global_unused
            << "  has grad: " << HasGrad(var_index);

    if (!global_unused) {
      VLOG(3) << "Start process unused Var";
      // 1. source var base
      const auto &var_locator = variable_locators_[var_index];
      const auto group_index = var_locator.group_index;
      const auto &group = groups_[group_index];
      const auto inside_group_index = var_locator.inside_group_index;
      const auto &src_tensor = group.dense_tensors_[inside_group_index];
      // sparse no need to check and no support find_unused_parameters
      if (group.is_sparse_) {
        continue;
      }
      // 2. destination var base
      auto dest_var_base = vars_[var_index];
      auto *dest_tensor =
          dest_var_base->MutableVar()->GetMutable<framework::LoDTensor>();
      const auto &dest_dims = dest_tensor->dims();

      // 3. create grad var base or get grad var base
      auto grad_var_base_tmp = dest_var_base->MutableGradVarBase();
1074 1075 1076 1077
      // NOTE(haohongxiang): Calling SetIsEmpty here is to make sure that
      // gradient accumulation can continue normally after clear_gradients()
      // especiall in cases including complex control flow.
      grad_var_base_tmp->SharedVar()->SetIsEmpty(false);
1078 1079 1080 1081 1082

      // 4. set grad tensor
      auto *dest_grad_tensor =
          grad_var_base_tmp->MutableVar()->GetMutable<framework::LoDTensor>();
      const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
1083 1084
      paddle::framework::TensorCopy(
          src_tensor, place_, *dev_ctx, dest_grad_tensor);
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
      dest_grad_tensor->Resize(dest_dims);
    }
  }
}

bool Reducer::HasGrad(size_t var_index) {
  const auto grad_var = vars_[var_index]->GradVarBase();
  if (!grad_var || !grad_var->Var().IsInitialized()) {
    return false;
  }

  const auto &var = grad_var->Var();
  if (var.IsType<framework::LoDTensor>()) {
    if (var.Get<framework::LoDTensor>().IsInitialized()) {
      return true;
    }
1101 1102
  } else if (var.IsType<phi::SelectedRows>()) {
    if (var.Get<phi::SelectedRows>().value().IsInitialized()) {
1103 1104 1105 1106 1107 1108 1109 1110 1111
      return true;
    }
  } else {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Only support LoDTensor and SelectedRows for gradient var"));
  }
  return false;
}

1112
void Reducer::FinalizeBackward() {
1113
  groups_need_finalize_ = false;
1114
  grad_need_hooks_ = false;
1115

1116 1117
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
1118
    parallel_ctx_->WaitComm(i);
1119 1120
  }

1121
  if (NeedRebuildGroup()) {
1122 1123 1124 1125 1126
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    group_indices_ = std::move(rebuild_group_indices);
    InitializeGroups(group_indices_);
  }
1127

1128
  if (find_unused_vars_each_step_) {
1129
// TODO(liuyuhui) support xpu about Tensorcopy/TensorFromVector/TensorToVector
1130 1131 1132
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) ||      \
    defined(PADDLE_WITH_GLOO) || defined(PADDLE_WITH_ASCEND_CL) || \
    defined(PADDLE_WITH_CNCL)
1133 1134 1135 1136 1137 1138 1139 1140 1141
    ProcessUnusedDenseVars();
#endif
    // Initialize local used vars
    local_used_vars_.clear();
    local_used_vars_.resize(vars_.size(), 0);
    VLOG(3) << "ProcessUnusedDenseVars is finished.";
  }

  VLOG(3) << "In the batch, Reducer is finished.";
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
1154 1155
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
1156 1157
  PADDLE_ENFORCE_EQ(vars.size(),
                    is_sparse_gradient.size(),
1158 1159 1160
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
1161 1162
                        vars.size(),
                        is_sparse_gradient.size()));
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
1174 1175
  PADDLE_ENFORCE_EQ(true,
                    check_perm(tensor_indices),
1176 1177 1178
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
1193 1194 1195 1196 1197 1198 1199

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
1200
      // we keep sparse var a single group
1201
      res.push_back({tensor_real_index});
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
    if (var->Var().IsType<framework::LoDTensor>()) {
      var_size = var->Var().Get<framework::LoDTensor>().numel();
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
1218
    group_info.first.push_back(tensor_real_index);
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
1245 1246
        group_index.empty(),
        true,
1247 1248 1249
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
1250
  if (tensor_indices.empty()) {
1251 1252
    std::sort(res.begin(),
              res.end(),
1253 1254 1255 1256
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
1257 1258 1259 1260 1261 1262
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle