reducer.cc 23.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

namespace paddle {
namespace imperative {

#if defined(PADDLE_WITH_NCCL)
std::shared_ptr<Reducer> Reducer::s_instance_ = NULL;

23 24
// context is used to select the stream for concat
void Group::ConcatTensors(const platform::CUDADeviceContext &context) {
25 26 27 28 29
  VLOG(3) << "Before concat, set output tensor size is " << all_length_;
  auto tensor = dense_contents_.GetMutable<framework::LoDTensor>();
  tensor->Resize(framework::make_ddim({all_length_}))
      .mutable_data(context.GetPlace(), dtype_);

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
  switch (dtype_) {
    case framework::proto::VarType::FP16:
      ConcatTensorsForAllReduce<platform::float16>(context, dense_tensors_,
                                                   &dense_contents_);
      break;
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<float>(context, dense_tensors_,
                                       &dense_contents_);
      break;
    case framework::proto::VarType::FP64:
      ConcatTensorsForAllReduce<double>(context, dense_tensors_,
                                        &dense_contents_);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(dtype_)));
  }
}

// context is used to select the stream for split
void Group::SplitTensors(const platform::CUDADeviceContext &context) {
  switch (dtype_) {
    case framework::proto::VarType::FP16:
      SplitTensorsForAllReduce<platform::float16>(context, &dense_contents_,
                                                  &dense_tensors_);
      break;
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<float>(context, &dense_contents_,
                                      &dense_tensors_);
      break;
    case framework::proto::VarType::FP64:
      SplitTensorsForAllReduce<double>(context, &dense_contents_,
                                       &dense_tensors_);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(dtype_)));
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
76
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

92 93 94
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
95
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
96 97
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
98 99 100
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
101
      parallel_ctx_(parallel_ctx),
102 103
      group_size_limits_(group_size_limits),
      find_unused_vars_(find_unused_vars) {
104
  VLOG(3) << "Start construct the Reducer ...";
105
  nrings_ = parallel_ctx->GetNRings();
106 107
  // initialize groups
  InitializeGroups(group_indices);
108 109
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
110 111
    auto var = vars_[global_var_index];
    var->SharedVar()->AddGradVarLeafBackwardHook(
112 113
        std::unique_ptr<LambdaGradAccumulatorPostHook>(
            new LambdaGradAccumulatorPostHook([=](VariableWrapper *grad) {
114
              this->AddDistHook(global_var_index);
115
            })));
116
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
117
  }
118
  // create streams
119 120 121
  compute_stream_ = static_cast<platform::CUDADeviceContext *>(
                        platform::DeviceContextPool::Instance().Get(place_))
                        ->stream();
122 123 124 125 126 127
  for (int i = 0; i < nrings_; ++i) {
    comm_streams_.emplace_back(
        platform::NCCLCommContext::Instance().Get(i, place_)->stream());
    comm_events_.emplace_back(platform::CudaEventResourcePool::Instance().New(
        BOOST_GET_CONST(platform::CUDAPlace, place_).device));
  }
128
  CreateGroupEvents(group_indices.size());
129 130 131 132 133 134 135

  std::call_once(once_flag_, []() {
    std::atexit([]() { Reducer::GetInstance()->ReleaseReducer(); });
  });
}

void Reducer::ReleaseReducer() {
136 137 138 139
  for (auto &event : group_events_) {
    event.reset();
  }
  for (auto &event : comm_events_) {
140 141 142 143
    event.reset();
  }
}

144 145
void Reducer::CreateGroupEvents(int group_num) {
  // release old events
146
  for (auto &event : group_events_) {
147 148
    event.reset();
  }
149 150 151
  group_events_.clear();
  group_events_.resize(group_num);
  for (auto &event : group_events_) {
152 153 154 155 156 157
    event = platform::CudaEventResourcePool::Instance().New(
        BOOST_GET_CONST(platform::CUDAPlace, place_).device);
  }
}

void Reducer::InitializeDenseGroups(
158 159 160 161 162 163 164 165
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
    const auto var_name = var->Name();
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index], false,
                      platform::errors::PreconditionNotMet(
166
                          "Tensor %s's GRAD must be LoDTensor, but received "
167 168 169 170 171 172
                          "GRAD is SelectedRows",
                          var_name));

    auto lod_tensor = var->MutableVar()->GetMutable<framework::LoDTensor>();
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(), true,
                      platform::errors::PreconditionNotMet(
173
                          "Tensor %s is not initialized.", var_name));
174 175 176
    auto size = lod_tensor->numel();
    PADDLE_ENFORCE_GT(
        size, 0, platform::errors::PreconditionNotMet(
177
                     "The number of tensor %s's elements is 0.", var_name));
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
    all_length += size;

    p_group->length_.push_back(size);

    // check the dtype and place, it must be same.
    auto dtype = var->DataType();
    auto place = var->Place();
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
          dtype, p_group->dtype_,
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
              var_name, framework::DataTypeToString(p_group->dtype_),
              framework::DataTypeToString(dtype)));
      PADDLE_ENFORCE_EQ(place, place_,
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
                            var_name, place_, place));
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
208
// Then specify the actual memory in MarkDenseVarReady.
209 210 211 212 213 214
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
215 216
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
217 218 219 220 221 222 223

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
        variable_indices_.size(), 0,
        platform::errors::PreconditionNotMet(
224
            "The number of group[%d]'s elements is 0.", group_index));
225 226 227 228 229 230 231 232 233 234 235
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
236
      InitializeDenseGroups(variable_indices_, &group);
237
    }
238 239 240

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
241
    for (const auto var_index : variable_indices_) {
242 243 244 245 246 247
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
248
    groups_.emplace_back(std::move(group));
249 250 251
    // Debug Message For Reducer
    VLOG(3) << "The Group[" << group_index << "]:";
    VLOG(3) << groups_.back();
252 253 254
  }
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
      node_deps_.empty(), true,
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    for (auto &cur_op : *cur_node) {
      cur_op.EnforceHasInOut();
    }

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

290 291
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
292 293
void Reducer::PrepareForBackward(
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
294 295 296 297
  VLOG(3) << "start reseting count..";
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
298 299 300 301
    group.all_length_ = 0;
    group.dense_tensors_.clear();
    group.dense_tensors_.reserve(group.pending_);
    group.sparse_contents_ = nullptr;
302
  });
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386

  PADDLE_ENFORCE_EQ(
      all_group_ready_, false,
      platform::errors::PreconditionNotMet(
          "Please note that all ``forward`` outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;
  if (!find_unused_vars_) {
    return;
  }

  // TODO(shenliang03) "find_unused_vars" interface will be exposed in the
  // future to handle control flow to process unused parameters
  find_unused_vars_ = false;

  unused_vars_.clear();
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      cur_op.EnforceHasInOut();
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
387 388 389 390 391
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
392
// MarkDenseVarReady. Find the position of the corresponding group
393 394 395 396 397
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
398 399 400 401 402 403 404 405 406 407 408 409 410 411
void Reducer::AddDistHook(size_t var_index) {
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
  if (!has_marked_unused_vars_) {
    has_marked_unused_vars_ = true;
    for (auto unused_index : unused_vars_) {
      if (NeedRebuildGroup()) {
        rebuild_vars_.push_back(vars_[unused_index]);
        rebuild_var_indices_.push_back(unused_index);
      }
      MarkVarReady(unused_index, false);
    }
  }
412

413
  if (NeedRebuildGroup()) {
414 415 416
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
417 418
  MarkVarReady(var_index, true);
}
419

420 421 422 423 424
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
  all_group_ready_ = true;
  const auto &var_locator = variable_locators_[var_index];
  auto group_index = var_locator.group_index;
  auto &group = groups_[group_index];
425

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
  if (is_used_var) {
    auto var_warpper = vars_[var_index]->GradVarBase()->SharedVar();
    if (!group.is_sparse_) {
      auto grad = var_warpper->MutableVar();
      auto inside_group_index = var_locator.inside_group_index;
      auto length = group.length_[inside_group_index];

      auto tensor = grad->GetMutable<framework::LoDTensor>();
      framework::Tensor tmp;
      tmp.ShareDataWith(*tensor).Resize({static_cast<int64_t>(length)});
      group.dense_tensors_.push_back(std::move(tmp));
      group.all_length_ += length;
    } else {
      group.sparse_contents_ = var_warpper->MutableVar();
    }
  }
442 443 444 445 446 447 448 449 450 451 452 453
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

void Reducer::MarkGroupReady(size_t group_index) {
  if (group_index > next_group_) {
454
    VLOG(3) << "It will adjust the order of group in next batch automatically";
455 456 457 458
    return;
  }

  PADDLE_ENFORCE_CUDA_SUCCESS(
459
      cudaEventRecord(group_events_[group_index].get(), compute_stream_));
460

461 462 463 464
  for (int i = 0; i < nrings_; ++i) {
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(
        comm_streams_[i], group_events_[group_index].get(), 0));
  }
465 466 467 468

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
    auto &group = groups_[next_group_];
469
    int run_order = next_group_ % nrings_;
470
    if (group.is_sparse_) {
471 472 473 474 475 476 477 478 479
      if (group.sparse_contents_ != nullptr) {
        VLOG(3) << "sparse group [" << next_group_
                << "] start allreduce in ring[" << run_order << "]";
        parallel_ctx_->AllReduceByStream(
            *group.sparse_contents_, group.sparse_contents_, run_order, false);
      } else {
        VLOG(3) << "The sparse group[" << next_group_
                << "] has no var to allreduce";
      }
480
    } else {
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
      if (!group.dense_tensors_.empty()) {
        VLOG(3) << "dense group [" << next_group_
                << "] start allreduce in ring[" << run_order << "]";
        // Select common commstream to concat tensors
        // group.dense_tensors ---> group.dense_contents_
        group.ConcatTensors(*parallel_ctx_->GetDeviceContext(run_order));

        // Start allreduce
        parallel_ctx_->AllReduceByStream(
            group.dense_contents_, &(group.dense_contents_), run_order, false);

        // Select common commstream to split tensors
        // group.dense_contents_ ---> group.dense_tensors
        group.SplitTensors(*parallel_ctx_->GetDeviceContext(run_order));
      } else {
        VLOG(3) << "The dense group[" << next_group_
                << "] has no var to allreduce";
      }
499 500 501 502
    }
  }
}

503
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
504 505 506 507 508 509 510 511 512
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
      rebuild_vars_.size(), vars_.size(),
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
          vars_.size(), rebuild_vars_.size()));
513 514 515 516 517 518 519 520 521 522 523 524
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
  auto rebuild_group_indices =
      AssignGroupBySize(rebuild_vars_, is_sparse_gradient_, group_size_limits_,
                        rebuild_var_indices_);
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

525
void Reducer::FinalizeBackward() {
526
  all_group_ready_ = false;
527 528 529 530 531 532 533 534 535 536
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
    PADDLE_ENFORCE_CUDA_SUCCESS(
        cudaEventRecord(comm_events_[i].get(), comm_streams_[i]));
  }
  for (int i = 0; i < nrings_; ++i) {
    PADDLE_ENFORCE_CUDA_SUCCESS(
        cudaStreamWaitEvent(compute_stream_, comm_events_[i].get(), 0));
  }

537
  if (NeedRebuildGroup()) {
538 539 540 541 542 543 544
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    auto rebuild_group_number = rebuild_group_indices.size();
    group_indices_ = std::move(rebuild_group_indices);
    CreateGroupEvents(rebuild_group_number);
    InitializeGroups(group_indices_);
  }
545

546 547 548 549 550 551 552 553 554 555 556 557 558
  VLOG(3) << "In the batch, Reducer is finished...";
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
559 560
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
561 562 563 564 565
  PADDLE_ENFORCE_EQ(vars.size(), is_sparse_gradient.size(),
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
                        vars.size(), is_sparse_gradient.size()));
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
  PADDLE_ENFORCE_EQ(true, check_perm(tensor_indices),
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
581 582 583 584 585 586 587 588 589 590 591 592 593 594
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
595 596 597 598 599 600 601

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
602
      // we keep sparse var a single group
603
      res.push_back({tensor_real_index});
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
    if (var->Var().IsType<framework::LoDTensor>()) {
      var_size = var->Var().Get<framework::LoDTensor>().numel();
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
620
    group_info.first.push_back(tensor_real_index);
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
        group_index.empty(), true,
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
651 652 653 654 655 656
  if (tensor_indices.empty()) {
    std::sort(res.begin(), res.end(),
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
657 658 659 660 661 662
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle