reducer.cc 29.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

17 18 19 20 21 22 23 24 25 26
#include <iostream>

#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/string/string_helper.h"

#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"

#include "paddle/fluid/imperative/parallel_context.h"

27 28 29
namespace paddle {
namespace imperative {

30
#if (defined PADDLE_WITH_NCCL) || (defined PADDLE_WITH_XPU_BKCL)
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
template <typename DeviceContext, typename T>
static void ConcatTensorsForAllReduce(
    const DeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents) {
  operators::math::ConcatFunctor<DeviceContext, T> concat_functor_;
  concat_functor_(context, dense_tensors_, 0,
                  p_dense_contents->GetMutable<framework::LoDTensor>());
}

template <typename DeviceContext, typename T>
static void SplitTensorsForAllReduce(
    const DeviceContext &context, framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors) {
  auto *in = p_dense_contents->GetMutable<framework::LoDTensor>();
  std::vector<framework::Tensor *> outs;
  std::vector<const framework::Tensor *> shape_refer;

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  // Sometimes direct copies will be faster
  if (p_dense_tensors->size() < 10) {
    operators::StridedMemcpyWithAxis0<T>(context, *in, shape_refer, &outs);
  } else {
    operators::math::SplitFunctor<DeviceContext, T> split_functor_;
    split_functor_(context, *in, shape_refer, 0, &outs);
  }
}

// context is used to select the stream for concat
template <typename DeviceContext>
static void ConcatTensorsWithType(
    const DeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
73
    case framework::proto::VarType::FP16:
74 75
      ConcatTensorsForAllReduce<DeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
76 77
      break;
    case framework::proto::VarType::FP32:
78 79
      ConcatTensorsForAllReduce<DeviceContext, float>(context, dense_tensors_,
                                                      p_dense_contents);
80 81
      break;
    case framework::proto::VarType::FP64:
82 83
      ConcatTensorsForAllReduce<DeviceContext, double>(context, dense_tensors_,
                                                       p_dense_contents);
84 85 86 87 88
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
89
          framework::DataTypeToString(type)));
90 91 92 93
  }
}

// context is used to select the stream for split
94 95 96 97 98 99
template <typename DeviceContext>
static void SplitTensorsWithType(
    const DeviceContext &context, framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
100
    case framework::proto::VarType::FP16:
101 102
      SplitTensorsForAllReduce<DeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
103 104
      break;
    case framework::proto::VarType::FP32:
105 106
      SplitTensorsForAllReduce<DeviceContext, float>(context, p_dense_contents,
                                                     p_dense_tensors);
107 108
      break;
    case framework::proto::VarType::FP64:
109 110
      SplitTensorsForAllReduce<DeviceContext, double>(context, p_dense_contents,
                                                      p_dense_tensors);
111 112 113 114 115
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
116 117 118 119
          framework::DataTypeToString(type)));
  }
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
#ifdef PADDLE_WITH_XPU_BKCL
template <>
void SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors) {
  auto *in = p_dense_contents->GetMutable<framework::LoDTensor>();
  std::vector<framework::Tensor *> outs;
  std::vector<const framework::Tensor *> shape_refer;

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());

  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  operators::math::SplitFunctor<platform::XPUDeviceContext, float>
      split_functor_;
  split_functor_(context, *in, shape_refer, 0, &outs);
}

// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    const std::vector<framework::Tensor> &dense_tensors_,
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
    std::vector<framework::Tensor> *p_dense_tensors,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

183 184 185 186 187 188 189 190 191 192 193
void Group::ConcatTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
#ifdef PADDLE_WITH_NCCL
    ConcatTensorsWithType(
        static_cast<const platform::CUDADeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat grad tensors since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
194 195 196 197 198 199 200 201 202 203
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    ConcatTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat xpu grads since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
#endif
  } else if (platform::is_cpu_place(place)) {
    ConcatTensorsWithType(
        static_cast<const platform::CPUDeviceContext &>(context),
        dense_tensors_, &dense_contents_, dtype_);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Concat grad tensor not supported on place (%s)", place));
  }
}

void Group::SplitTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
#ifdef PADDLE_WITH_NCCL
    SplitTensorsWithType(
        static_cast<const platform::CUDADeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split grad tensor since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
226 227 228 229 230 231 232 233 234 235
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    SplitTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split xpu grad since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
236 237 238 239 240 241 242 243
#endif
  } else if (platform::is_cpu_place(place)) {
    SplitTensorsWithType(
        static_cast<const platform::CPUDeviceContext &>(context),
        &dense_contents_, &dense_tensors_, dtype_);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Split grad tensor not supported on place (%s)", place));
244 245 246 247 248
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
249
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

265 266 267
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
268
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
269 270
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
271 272 273
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
274
      parallel_ctx_(parallel_ctx),
275 276
      group_size_limits_(group_size_limits),
      find_unused_vars_(find_unused_vars) {
277
  VLOG(3) << "Start construct the Reducer ...";
278
  nrings_ = parallel_ctx->GetNRings();
279 280
  // initialize groups
  InitializeGroups(group_indices);
281 282
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
283 284
    auto var = vars_[global_var_index];
    var->SharedVar()->AddGradVarLeafBackwardHook(
285 286
        std::unique_ptr<LambdaGradAccumulatorPostHook>(
            new LambdaGradAccumulatorPostHook([=](VariableWrapper *grad) {
287
              this->AddDistHook(global_var_index);
288
            })));
289
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
290 291 292
  }
}

293
void Reducer::InitializeDenseGroups(
294 295 296 297 298 299 300 301
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
    const auto var_name = var->Name();
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index], false,
                      platform::errors::PreconditionNotMet(
302
                          "Tensor %s's GRAD must be LoDTensor, but received "
303 304 305 306 307 308
                          "GRAD is SelectedRows",
                          var_name));

    auto lod_tensor = var->MutableVar()->GetMutable<framework::LoDTensor>();
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(), true,
                      platform::errors::PreconditionNotMet(
309
                          "Tensor %s is not initialized.", var_name));
310 311 312
    auto size = lod_tensor->numel();
    PADDLE_ENFORCE_GT(
        size, 0, platform::errors::PreconditionNotMet(
313
                     "The number of tensor %s's elements is 0.", var_name));
314 315 316 317
    all_length += size;

    p_group->length_.push_back(size);

318 319 320
    // for concat operator
    p_group->dense_tensors_.push_back(framework::Tensor());

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
    // check the dtype and place, it must be same.
    auto dtype = var->DataType();
    auto place = var->Place();
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
          dtype, p_group->dtype_,
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
              var_name, framework::DataTypeToString(p_group->dtype_),
              framework::DataTypeToString(dtype)));
      PADDLE_ENFORCE_EQ(place, place_,
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
                            var_name, place_, place));
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
342
  p_group->all_length_ = all_length;
343 344 345 346 347
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
348
// Then specify the actual memory in MarkDenseVarReady.
349 350 351 352 353 354
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
355 356
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
357 358 359 360 361 362 363

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
        variable_indices_.size(), 0,
        platform::errors::PreconditionNotMet(
364
            "The number of group[%d]'s elements is 0.", group_index));
365 366 367 368 369 370 371 372 373 374 375
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
376
      InitializeDenseGroups(variable_indices_, &group);
377 378 379
      auto tensor = group.dense_contents_.GetMutable<framework::LoDTensor>();
      tensor->Resize(framework::make_ddim({group.all_length_}))
          .mutable_data(place_, group.dtype_);
380
    }
381 382 383

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
384
    for (const auto var_index : variable_indices_) {
385 386 387 388 389 390
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
391
    groups_.emplace_back(std::move(group));
392 393 394
    // Debug Message For Reducer
    VLOG(3) << "The Group[" << group_index << "]:";
    VLOG(3) << groups_.back();
395 396 397
  }
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
      node_deps_.empty(), true,
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    for (auto &cur_op : *cur_node) {
      cur_op.EnforceHasInOut();
    }

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

433 434
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
435 436
void Reducer::PrepareForBackward(
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
437 438 439 440
  VLOG(3) << "start reseting count..";
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
441
    group.sparse_contents_ = nullptr;
442
  });
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526

  PADDLE_ENFORCE_EQ(
      all_group_ready_, false,
      platform::errors::PreconditionNotMet(
          "Please note that all ``forward`` outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;
  if (!find_unused_vars_) {
    return;
  }

  // TODO(shenliang03) "find_unused_vars" interface will be exposed in the
  // future to handle control flow to process unused parameters
  find_unused_vars_ = false;

  unused_vars_.clear();
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      cur_op.EnforceHasInOut();
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
527 528 529 530 531
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
532
// MarkDenseVarReady. Find the position of the corresponding group
533 534 535 536 537
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
538 539 540 541 542 543 544 545 546 547 548 549 550 551
void Reducer::AddDistHook(size_t var_index) {
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
  if (!has_marked_unused_vars_) {
    has_marked_unused_vars_ = true;
    for (auto unused_index : unused_vars_) {
      if (NeedRebuildGroup()) {
        rebuild_vars_.push_back(vars_[unused_index]);
        rebuild_var_indices_.push_back(unused_index);
      }
      MarkVarReady(unused_index, false);
    }
  }
552

553
  if (NeedRebuildGroup()) {
554 555 556
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
557 558
  MarkVarReady(var_index, true);
}
559

560 561 562 563 564
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
  all_group_ready_ = true;
  const auto &var_locator = variable_locators_[var_index];
  auto group_index = var_locator.group_index;
  auto &group = groups_[group_index];
565

566 567 568 569 570 571 572 573 574 575 576
  if (!group.is_sparse_) {
    // process dense group
    auto inside_group_index = var_locator.inside_group_index;
    auto length = group.length_[inside_group_index];
    auto &group_tensor = group.dense_tensors_[inside_group_index];
    if (is_used_var) {
      auto var_warpper = vars_[var_index]->GradVarBase()->SharedVar();
      auto tensor =
          var_warpper->MutableVar()->GetMutable<framework::LoDTensor>();
      group_tensor.ShareDataWith(*tensor).Resize(
          {static_cast<int64_t>(length)});
577
    } else {
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
      if (!group_tensor.IsInitialized()) {
        group_tensor.Resize({static_cast<int64_t>(length)});
        group_tensor.mutable_data(place_, group.dtype_);
#ifdef PADDLE_WITH_XPU_BKCL
        if (platform::is_xpu_place(group_tensor.place())) {
          // TODO(liuyuhui) support XPU set constant
          VLOG(3) << "XPU doesn't support set_constant";
        }
#else
        auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
        operators::math::set_constant(*dev_ctx, &group_tensor, 0.0);
#endif
      }
    }
  } else {
    // process sparse group
    if (is_used_var) {
      auto var_warpper = vars_[var_index]->GradVarBase()->SharedVar();
596
      group.sparse_contents_ = var_warpper->MutableVar();
597 598
    } else {
      group.sparse_contents_ = nullptr;
599 600
    }
  }
601

602 603 604 605 606 607 608 609 610 611 612 613
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

void Reducer::MarkGroupReady(size_t group_index) {
  if (group_index > next_group_) {
614
    VLOG(3) << "It will adjust the order of group in next batch automatically";
615 616 617 618 619 620
    return;
  }

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
    auto &group = groups_[next_group_];
621
    int run_order = next_group_ % nrings_;
622 623 624 625 626 627 628 629

    // For CUDA or XPU, compute_stream --> comm_stream.
    // For CPU, do nothing.
    // NOTE. Because concat uses the comm_stream,
    // so we expose WaitCompute() interface and call
    // it here.
    parallel_ctx_->WaitCompute(run_order);

630
    if (group.is_sparse_) {
631 632 633 634 635 636 637 638 639
      if (group.sparse_contents_ != nullptr) {
        VLOG(3) << "sparse group [" << next_group_
                << "] start allreduce in ring[" << run_order << "]";
        parallel_ctx_->AllReduceByStream(
            *group.sparse_contents_, group.sparse_contents_, run_order, false);
      } else {
        VLOG(3) << "The sparse group[" << next_group_
                << "] has no var to allreduce";
      }
640
    } else {
641 642 643 644 645
      VLOG(3) << "dense group [" << next_group_ << "] start allreduce in ring["
              << run_order << "]";
      // Select common commstream to concat tensors
      // group.dense_tensors ---> group.dense_contents_
      group.ConcatTensors(*parallel_ctx_->GetDeviceContext(run_order));
646

647
// NOTE(liuyuhui): ConcatTensors use communication stream, but BKCL only support
648 649
// default stream for communicating, so there exist some problems in
// synchronization. And need to add a WaitComm there.
650 651 652
// TODO(liuyuhui): If BKCL support events, it should be fixed as non-blocking
// communication.
#ifdef PADDLE_WITH_XPU_BKCL
653 654 655
      if (platform::is_xpu_place(group.dense_tensors_[0].place())) {
        parallel_ctx_->WaitComm(run_order);
      }
656 657
#endif

658 659 660
      // Start allreduce
      parallel_ctx_->AllReduceByStream(
          group.dense_contents_, &(group.dense_contents_), run_order, false);
661

662 663 664
      // Select common commstream to split tensors
      // group.dense_contents_ ---> group.dense_tensors
      group.SplitTensors(*parallel_ctx_->GetDeviceContext(run_order));
665 666 667 668
    }
  }
}

669
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
670 671 672 673 674 675 676 677 678
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
      rebuild_vars_.size(), vars_.size(),
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
          vars_.size(), rebuild_vars_.size()));
679 680 681 682 683 684 685 686 687 688 689 690
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
  auto rebuild_group_indices =
      AssignGroupBySize(rebuild_vars_, is_sparse_gradient_, group_size_limits_,
                        rebuild_var_indices_);
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

691
void Reducer::FinalizeBackward() {
692
  all_group_ready_ = false;
693 694
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
695
    parallel_ctx_->WaitComm(i);
696 697
  }

698
  if (NeedRebuildGroup()) {
699 700 701 702 703
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    group_indices_ = std::move(rebuild_group_indices);
    InitializeGroups(group_indices_);
  }
704

705 706 707 708 709 710 711 712 713 714 715 716 717
  VLOG(3) << "In the batch, Reducer is finished...";
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
718 719
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
720 721 722 723 724
  PADDLE_ENFORCE_EQ(vars.size(), is_sparse_gradient.size(),
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
                        vars.size(), is_sparse_gradient.size()));
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
  PADDLE_ENFORCE_EQ(true, check_perm(tensor_indices),
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
740 741 742 743 744 745 746 747 748 749 750 751 752 753
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
754 755 756 757 758 759 760

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
761
      // we keep sparse var a single group
762
      res.push_back({tensor_real_index});
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
    if (var->Var().IsType<framework::LoDTensor>()) {
      var_size = var->Var().Get<framework::LoDTensor>().numel();
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
779
    group_info.first.push_back(tensor_real_index);
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
        group_index.empty(), true,
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
810 811 812 813 814 815
  if (tensor_indices.empty()) {
    std::sort(res.begin(), res.end(),
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
816 817 818 819 820 821
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle