reducer.cc 43.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

17 18
#include <iostream>

19
#include "paddle/fluid/framework/tensor_util.h"
20
#include "paddle/fluid/imperative/layer.h"
21
#include "paddle/fluid/imperative/parallel_context.h"
22
#include "paddle/fluid/operators/math/concat_and_split.h"
23
#include "paddle/phi/kernels/funcs/strided_memcpy.h"
24
#ifdef PADDLE_WITH_XPU
25 26
#include "paddle/fluid/platform/device/xpu/enforce_xpu.h"
#endif
27
#include "paddle/fluid/string/string_helper.h"
28
#include "paddle/phi/core/dense_tensor.h"
29 30 31
namespace paddle {
namespace imperative {

K
Kim Yann 已提交
32 33
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
    defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_GLOO)
34 35
// div the nranks
void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
36
  phi::DenseTensor *tensor =
37
      is_sparse_
38
          ? sparse_contents_->GetMutable<phi::SelectedRows>()->mutable_value()
39
          : dense_contents_.GetMutable<phi::DenseTensor>();
40 41

  if (platform::is_gpu_place(tensor->place())) {
42
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
43 44 45
    DivNRanks(tensor, nranks, context);
#endif
  } else if (platform::is_cpu_place(tensor->place())) {
46 47
    VLOG(4) << "before div 2" << *tensor;
    VLOG(4) << "NDiv for cpu devices : rank = " << nranks;
48 49 50 51 52 53
#ifdef PADDLE_WITH_HIP
    if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
      PADDLE_THROW(paddle::platform::errors::Fatal(
          "Unsupport BF16 in DataParallel for now"));
    }
    framework::VisitDataTypeForHIP(
54
        dtype_,
L
Leo Chen 已提交
55
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
56
#else
L
Leo Chen 已提交
57 58 59
    framework::VisitDataType(
        dtype_,
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
60
#endif
61
    VLOG(4) << "after div 2" << *tensor;
62 63
  } else if (platform::is_xpu_place(tensor->place())) {
#ifdef PADDLE_WITH_XPU_BKCL
64 65 66
    PADDLE_THROW(
        platform::errors::Unimplemented("DivNRanks is not supported on XPU / "
                                        "XPU_BKCL, use EagerReducer instead."));
67 68 69 70
#endif
  }
}

71 72 73
template <typename DeviceContext, typename T>
static void ConcatTensorsForAllReduce(
    const DeviceContext &context,
74
    const std::vector<phi::DenseTensor> &dense_tensors_,
75 76
    framework::Variable *p_dense_contents) {
  operators::math::ConcatFunctor<DeviceContext, T> concat_functor_;
77 78 79
  concat_functor_(context,
                  dense_tensors_,
                  0,
80
                  p_dense_contents->GetMutable<phi::DenseTensor>());
81 82 83 84
}

template <typename DeviceContext, typename T>
static void SplitTensorsForAllReduce(
85 86
    const DeviceContext &context,
    framework::Variable *p_dense_contents,
87
    std::vector<phi::DenseTensor> *p_dense_tensors) {
88
  auto *in = p_dense_contents->GetMutable<phi::DenseTensor>();
89 90
  std::vector<phi::DenseTensor *> outs;
  std::vector<const phi::DenseTensor *> shape_refer;
91 92 93

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());
94

95 96 97 98 99 100
  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  // Sometimes direct copies will be faster
  if (p_dense_tensors->size() < 10) {
101 102
    phi::funcs::StridedMemcpyWithAxis0<T, DeviceContext>(
        context, *in, shape_refer, &outs);
103 104 105 106 107 108 109 110 111 112
  } else {
    operators::math::SplitFunctor<DeviceContext, T> split_functor_;
    split_functor_(context, *in, shape_refer, 0, &outs);
  }
}

// context is used to select the stream for concat
template <typename DeviceContext>
static void ConcatTensorsWithType(
    const DeviceContext &context,
113
    const std::vector<phi::DenseTensor> &dense_tensors_,
114 115 116
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
117
    case framework::proto::VarType::FP16:
118 119
      ConcatTensorsForAllReduce<DeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
120 121
      break;
    case framework::proto::VarType::FP32:
122 123
      ConcatTensorsForAllReduce<DeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
124 125
      break;
    case framework::proto::VarType::FP64:
126 127
      ConcatTensorsForAllReduce<DeviceContext, double>(
          context, dense_tensors_, p_dense_contents);
128 129 130 131 132
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
133
          framework::DataTypeToString(type)));
134 135 136 137
  }
}

// context is used to select the stream for split
138
template <typename DeviceContext>
139 140 141 142
static void SplitTensorsWithType(const DeviceContext &context,
                                 framework::Variable *p_dense_contents,
                                 std::vector<phi::DenseTensor> *p_dense_tensors,
                                 framework::proto::VarType::Type type) {
143
  switch (type) {
144
    case framework::proto::VarType::FP16:
145 146
      SplitTensorsForAllReduce<DeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
147 148
      break;
    case framework::proto::VarType::FP32:
149 150
      SplitTensorsForAllReduce<DeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
151 152
      break;
    case framework::proto::VarType::FP64:
153 154
      SplitTensorsForAllReduce<DeviceContext, double>(
          context, p_dense_contents, p_dense_tensors);
155 156 157 158 159
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
160 161 162 163
          framework::DataTypeToString(type)));
  }
}

164 165 166 167 168
#ifdef PADDLE_WITH_XPU_BKCL
template <>
void SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
169
    std::vector<phi::DenseTensor> *p_dense_tensors) {
170
  auto *in = p_dense_contents->GetMutable<phi::DenseTensor>();
171 172
  std::vector<phi::DenseTensor *> outs;
  std::vector<const phi::DenseTensor *> shape_refer;
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());

  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  operators::math::SplitFunctor<platform::XPUDeviceContext, float>
      split_functor_;
  split_functor_(context, *in, shape_refer, 0, &outs);
}

// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
190
    const std::vector<phi::DenseTensor> &dense_tensors_,
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
211
    std::vector<phi::DenseTensor> *p_dense_tensors,
212 213 214 215 216 217
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
K
kuizhiqing 已提交
218 219 220 221 222 223 224 225 226
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

227 228 229
void Group::ConcatTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
230
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
L
Leo Chen 已提交
231 232 233 234
    ConcatTensorsWithType(static_cast<const phi::GPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
235 236 237 238
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat grad tensors since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
239 240 241 242 243
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    ConcatTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
244 245 246
        dense_tensors_,
        &dense_contents_,
        dtype_);
247 248 249 250
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat xpu grads since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
251
#endif
252
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
253 254 255 256
    ConcatTensorsWithType(static_cast<const phi::CPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
257 258 259 260 261 262 263 264 265
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Concat grad tensor not supported on place (%s)", place));
  }
}

void Group::SplitTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
266
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
L
Leo Chen 已提交
267 268 269 270
    SplitTensorsWithType(static_cast<const phi::GPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
271 272 273 274
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split grad tensor since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
275 276 277 278 279
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    SplitTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
280 281 282
        &dense_contents_,
        &dense_tensors_,
        dtype_);
283 284 285 286
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split xpu grad since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
287
#endif
288
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
289 290 291 292
    SplitTensorsWithType(static_cast<const phi::CPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
293 294 295
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Split grad tensor not supported on place (%s)", place));
296 297 298 299 300
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
301
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

317 318 319
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
320
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
321 322
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
323 324 325
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
326
      parallel_ctx_(parallel_ctx),
327
      group_size_limits_(group_size_limits),
328
      find_unused_vars_each_step_(find_unused_vars) {
329
  VLOG(3) << "Start construct the Reducer ...";
330
  nrings_ = parallel_ctx->GetNRings();
331
  nranks_ = parallel_ctx->GetNRanks();
332 333
  // initialize groups
  InitializeGroups(group_indices);
334 335
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
336
    auto var = vars_[global_var_index];
337 338
    var->GradVarBase()->AddVoidHook(std::make_shared<std::function<void()>>(
        [=]() { this->AddDistHook(global_var_index); }));
339
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
340
  }
341 342 343 344 345 346

  // for checking var is ready once
  vars_marked_ready_.resize(vars_.size(), false);

  // Initialize local used vars
  local_used_vars_.resize(vars_.size(), 0);
347 348
}

349
void Reducer::InitializeDenseGroups(
350 351 352 353 354
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
355
    const auto &var_name = var->Name();
356 357
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index],
                      false,
358
                      platform::errors::PreconditionNotMet(
359
                          "Tensor %s's GRAD must be LoDTensor, but received "
360 361 362
                          "GRAD is SelectedRows",
                          var_name));

363
    auto lod_tensor = var->MutableVar()->GetMutable<phi::DenseTensor>();
364 365
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(),
                      true,
366
                      platform::errors::PreconditionNotMet(
367
                          "Tensor %s is not initialized.", var_name));
368
    const auto size = lod_tensor->numel();
369
    PADDLE_ENFORCE_GT(
370 371
        size,
        0,
372 373
        platform::errors::PreconditionNotMet(
            "The number of tensor %s's elements is 0.", var_name));
374 375 376 377
    all_length += size;

    p_group->length_.push_back(size);

378
    // for concat operator
379
    p_group->dense_tensors_.push_back(phi::DenseTensor());
380

381
    // check the dtype and place, it must be same.
382 383
    const auto &dtype = var->DataType();
    const auto &place = var->Place();
384 385
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
386 387
          dtype,
          p_group->dtype_,
388 389 390
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
391 392
              var_name,
              framework::DataTypeToString(p_group->dtype_),
393
              framework::DataTypeToString(dtype)));
394 395
      PADDLE_ENFORCE_EQ(place,
                        place_,
396 397 398
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
399 400 401
                            var_name,
                            place_,
                            place));
402 403 404 405 406
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
407
  p_group->all_length_ = all_length;
408 409 410 411 412
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
413
// Then specify the actual memory in MarkDenseVarReady.
414 415 416 417 418 419
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
420 421
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
422 423 424 425 426

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
427 428
        variable_indices_.size(),
        0,
429
        platform::errors::PreconditionNotMet(
430
            "The number of group[%d]'s elements is 0.", group_index));
431 432 433 434 435 436 437 438 439 440 441
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
442
      InitializeDenseGroups(variable_indices_, &group);
443
    }
444 445 446

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
447
    for (const auto var_index : variable_indices_) {
448 449 450 451 452 453
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
454
    groups_.emplace_back(std::move(group));
455
    // Debug Message For Reducer
456
    VLOG(3) << "The Group[" << group_index << "]:" << groups_.back();
457 458 459
  }
}

460 461
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
462 463
      node_deps_.empty(),
      true,
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
483 484 485 486 487
      // py_layer is not supported in DataParallel
      auto begin = grad_pending_node->begin();
      auto end = grad_pending_node->end();
      for (auto op_base = begin; op_base != end; op_base++) {
        PADDLE_ENFORCE_EQ(
488 489
            op_base->Type() != "py_layer",
            true,
490 491 492 493 494 495 496 497 498
            platform::errors::PreconditionNotMet(
                "Note: Currently PyLayer is not supported in DataParallel. For "
                "using PyLayer in a DataParallel model, you can skip gradient "
                "synchronization among multiple cards by 'no_sync', and "
                "manually implement 'all_reduce' before model optimization. "
                "There is an example showing specific implemetation processing "
                "in offical docs: https://www.paddlepaddle.org.cn/documentation"
                "/docs/api/paddle/DataParallel_cn.html"));
      }
499 500 501 502 503 504 505 506 507
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

508
void Reducer::TraverseBackwardGraph(
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
571
}
572

573 574 575
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
void Reducer::PrepareForBackward(
576
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
577
  VLOG(3) << "after forward, then reset count for backward.";
578
  grad_need_hooks_ = true;
579 580 581 582 583 584 585 586 587 588 589
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
    group.sparse_contents_ = nullptr;
  });

  // reinitialize vars_marked_ready_ for next iteration
  vars_marked_ready_.clear();
  vars_marked_ready_.resize(vars_.size(), false);

  PADDLE_ENFORCE_EQ(
590 591
      groups_need_finalize_,
      false,
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
      platform::errors::PreconditionNotMet(
          "A serious error has occurred here. Please "
          "set find_unused_parameters=True to traverse backward graph "
          "in each step to prepare reduce in advance. If you have "
          "set, There may be several reasons for this error: "
          "1) Please note that all forward outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph. "
          "2) Used multiple forwards and one backward. You may be able to wrap "
          "multiple forwards in a model."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;

  if (find_unused_vars_once_ || find_unused_vars_each_step_) {
    unused_vars_.clear();
611
    TraverseBackwardGraph(outputs);
612 613 614 615 616
    // only check once in first step
    find_unused_vars_once_ = false;
  }

  if (find_unused_vars_each_step_ && unused_vars_.empty()) {
617 618 619 620 621 622 623 624
    LOG_FIRST_N(WARNING, 1)
        << "All parameters are involved in the backward pass. "
           "It is recommended to set find_unused_parameters to False "
           "to improve performance. However, if unused parameters "
           "appear in subsequent iterative training, then an error "
           "will occur. Please make it clear that in the subsequent "
           "training, there will be no parameters that are not used "
           "in the backward pass, and then set find_unused_parameters";
625 626 627
  }

  if (unused_vars_.size() == vars_.size()) {
628 629 630 631 632 633
    LOG_FIRST_N(WARNING, 1)
        << "There is no parameter in the device involved "
           "in the backward calculation. If there are "
           "parameters on other devices involved in the "
           "backward, then a serious error will occur here.";
  }
634 635 636 637 638
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
639
// MarkDenseVarReady. Find the position of the corresponding group
640 641 642 643 644
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
645
void Reducer::AddDistHook(size_t var_index) {
646 647
  PADDLE_ENFORCE_LT(var_index,
                    variable_locators_.size(),
648 649 650
                    platform::errors::OutOfRange(
                        "Out of bounds variable index. it must be less"
                        "than %d, but it is %d",
651 652
                        variable_locators_.size(),
                        var_index));
653

654 655 656 657 658
  // gradient synchronization is not required when grad_need_hooks_ is false.
  if (!grad_need_hooks_) {
    return;
  }

659 660 661
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
662

663 664
  local_used_vars_[var_index] = 1;

665
  // rebuild group when find_unused_vars_each_step_ is false
666
  if (NeedRebuildGroup()) {
667 668 669
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
670

671
  if (!has_marked_unused_vars_) {
672 673 674 675 676 677
    has_marked_unused_vars_ = true;
    for (const auto &unused_index : unused_vars_) {
      MarkVarReady(unused_index, false);
    }
  }

678 679
  MarkVarReady(var_index, true);
}
680

681
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
682 683
  groups_need_finalize_ = true;

684
  const auto &var_locator = variable_locators_[var_index];
685
  const auto group_index = var_locator.group_index;
686
  auto &group = groups_[group_index];
687

688 689 690 691
  // error happened, if the var is ready before.
  if (vars_marked_ready_[var_index]) {
    auto error_info = string::Sprintf(
        "Error happened, when parameter[%d][%s] has been ready before. "
692 693 694
        "Please set find_unused_parameters=True to traverse backward graph "
        "in each step to prepare reduce in advance. If you have set, "
        "there may be several reasons for this error: "
695 696 697 698
        "1) In multiple reentrant backward phase, some parameters are reused."
        "2) Using model parameters outside of forward function. Please "
        "make sure that model parameters are not shared in concurrent "
        "forward-backward passes.",
699 700
        var_index,
        vars_[var_index]->GradVarBase()->Name());
701

702 703
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      false,
704 705 706 707 708 709 710 711 712 713 714 715 716 717
                      platform::errors::PreconditionNotMet(error_info));

    error_info +=
        "3) Unused parameters retrieval is incorrect. "
        "The return value of forward will be used to retrieve"
        " the unused parameters of the entire model. These "
        "gradients of unused parameters will not be synchronized "
        "between multiple cards. However, if the unused "
        "parameters participate in the backward calculation "
        "again at a later time (e.g. after the forward function, "
        "the loss calculation uses the unused "
        "paramters of the forward and trigger backward), "
        "its gradient will be wrong.";

718 719
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      true,
720 721 722 723 724
                      platform::errors::PreconditionNotMet(error_info));
  } else {
    vars_marked_ready_[var_index] = true;
  }

725 726
  if (!group.is_sparse_) {
    // process dense group
727 728
    const auto inside_group_index = var_locator.inside_group_index;
    const auto length = group.length_[inside_group_index];
729
    auto &group_tensor = group.dense_tensors_[inside_group_index];
730

731
    if (is_used_var) {
732
      auto var_base = vars_[var_index]->GradVarBase();
733
      auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
734 735
      group_tensor.ShareDataWith(*tensor).Resize(
          {static_cast<int64_t>(length)});
736
    } else {
737 738
      // TODO(shenliang03): maybe save the memory
      // by avoiding tensor construction
739 740
      if (!group_tensor.IsInitialized()) {
        group_tensor.Resize({static_cast<int64_t>(length)});
741
        group_tensor.mutable_data(place_,
742
                                  framework::TransToPhiDataType(group.dtype_));
743 744
      }

745
#ifdef PADDLE_WITH_XPU_BKCL
746
      if (platform::is_xpu_place(group_tensor.place())) {
747 748 749 750
        auto dev_ctx = static_cast<platform::XPUDeviceContext *>(
            platform::DeviceContextPool::Instance().Get(place_));
        if (HasGrad(var_index)) {
          auto var_base = vars_[var_index]->GradVarBase();
751
          auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
752 753 754 755 756 757 758 759 760 761 762
          group_tensor.ShareDataWith(*tensor).Resize(
              {static_cast<int64_t>(length)});
        } else {
          group_tensor.Resize({static_cast<int64_t>(length)});
          int r = xpu::constant(dev_ctx->x_context(),
                                reinterpret_cast<float *>(group_tensor.data()),
                                group_tensor.numel(),
                                0.0f);
          PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
          PADDLE_ENFORCE_XPU_SUCCESS(xpu_wait(dev_ctx->stream()));
        }
763
      }
764
#else
765 766 767
      auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
      if (HasGrad(var_index)) {
        auto var_base = vars_[var_index]->GradVarBase();
768
        auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
769 770
        group_tensor.ShareDataWith(*tensor).Resize(
            {static_cast<int64_t>(length)});
771 772
      } else {
        group_tensor.Resize({static_cast<int64_t>(length)});
773
        phi::funcs::set_constant(*dev_ctx, &group_tensor, 0.0);
774
      }
775
#endif
776 777 778
    }
  } else {
    // process sparse group
779
    PADDLE_ENFORCE_EQ(
780 781
        HasGrad(var_index),
        true,
782 783 784 785 786 787 788
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] should have gradient. "
            "Currently, DataParallel does not support sparse "
            "parameters without generating gradients during training. "
            "For example, if is_sparese=True is used in Embedding, "
            "the current step of this parameter cannot generate gradient "
            "because of stop_gradient/detatch, where error will occur.",
789 790
            var_index,
            vars_[var_index]->Name()));
791 792 793
    auto var_base = vars_[var_index]->GradVarBase();
    // need to check tensor type
    PADDLE_ENFORCE_EQ(
794 795
        var_base->Var().IsType<phi::SelectedRows>(),
        true,
796 797 798 799 800 801 802 803 804
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] must have a selectedrows gradient. "
            "Before forward pass, the parameter type is inferred to be "
            "SelectedRows, but after backward pass, its actual type becomes "
            "LodTensor. It is currently not supported by DataParallel. "
            "For example, if sparse embedding is used, and the weight of "
            "embedding is shared with subsequent dense parameters, then "
            "the parameter gradient of the embedding will be converted "
            "to dense parameters.",
805 806
            var_index,
            vars_[var_index]->Name()));
807 808

    group.sparse_contents_ = var_base->MutableVar();
809
  }
810

811 812 813 814 815 816 817 818 819 820
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

821
// TODO(liuyuhui): If BKCL support non-blocking communication, it should be
822
// fixed as same as multi gpus card training.
823
void Reducer::MarkGroupReady(size_t group_index) {
824
  PADDLE_ENFORCE_GE(
825 826
      group_index,
      next_group_,
827 828 829 830
      platform::errors::PreconditionNotMet(
          "The index of the incoming group must be greater "
          "than or equal to the previously synchronized group index, "
          "expect it to greater than or equal to %d, but got %d.",
831 832
          next_group_,
          group_index));
833

834
  if (group_index > next_group_) {
835
    VLOG(3) << "It will adjust the order of group in next batch automatically";
836 837 838 839 840
    return;
  }

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
841 842
    UNUSED auto &group = groups_[next_group_];
    UNUSED const int run_order = next_group_ % nrings_;
843

844
    auto *tensor = group.dense_contents_.GetMutable<phi::DenseTensor>();
845 846 847
    tensor->Resize(phi::make_ddim({group.all_length_}))
        .mutable_data(place_, framework::TransToPhiDataType(group.dtype_));

848 849 850 851 852 853
    // For CUDA or XPU, compute_stream --> comm_stream.
    // For CPU, do nothing.
    // NOTE. Because concat uses the comm_stream,
    // so we expose WaitCompute() interface and call
    // it here.
    parallel_ctx_->WaitCompute(run_order);
854
    FusedAllReduceSchedule(run_order, group, next_group_);
855 856 857
  }
}

858 859
void Reducer::FusedAllReduceSchedule(const int run_order,
                                     Group &group,
860 861 862 863
                                     const int curr_group_index) {
  // The overall timeline: concat > div_nranks > allreduce > split
  // dev_context is used to select different stream
  const auto &dev_context = *parallel_ctx_->GetDeviceContext(run_order);
864
  if (group.is_sparse_) {
865 866 867
    VLOG(3) << "sparse group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
    group.DivNRanks(dev_context, nranks_);
868 869
    parallel_ctx_->AllReduceByStream(
        *group.sparse_contents_, group.sparse_contents_, run_order, false);
870
  } else {
871 872
    VLOG(3) << "dense group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
873 874
    // Select common commstream to concat tensors
    // group.dense_tensors ---> group.dense_contents_
875
    group.ConcatTensors(dev_context);
876

877
    group.DivNRanks(dev_context, nranks_);
878 879 880
    // Start allreduce
    parallel_ctx_->AllReduceByStream(
        group.dense_contents_, &(group.dense_contents_), run_order, false);
881

882
    // Select communication stream to split tensors
883
    // group.dense_contents_ ---> group.dense_tensors
884
    group.SplitTensors(dev_context);
885 886 887
  }
}

888
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
889 890 891 892
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
893 894
      rebuild_vars_.size(),
      vars_.size(),
895 896 897
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
898 899
          vars_.size(),
          rebuild_vars_.size()));
900 901
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
902 903 904 905
  auto rebuild_group_indices = AssignGroupBySize(rebuild_vars_,
                                                 is_sparse_gradient_,
                                                 group_size_limits_,
                                                 rebuild_var_indices_);
906 907 908 909 910 911 912
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

913 914 915 916 917 918 919
void Reducer::ProcessUnusedDenseVars() {
  // The calculation stream must be used here to
  // avoid conflicts with communication.
  VLOG(3) << "Local used vars : "
          << string::join_strings(local_used_vars_, ',');
  const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
  // H2D is to allreduce the local_used_vars_
920
  auto *global_used_tensor = global_used_vars_.GetMutable<phi::DenseTensor>();
921 922 923 924 925 926
  framework::TensorFromVector<int>(
      local_used_vars_, *dev_ctx, global_used_tensor);
  parallel_ctx_->AllReduceByStream(
      global_used_vars_, &global_used_vars_, 0, true);
  framework::TensorToVector<int>(
      *global_used_tensor, *dev_ctx, &local_used_vars_);
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956

  // sync compute stream to get global used var message,
  // but maybe affect speed performance
  parallel_ctx_->SynchronizeCompute();
  VLOG(3) << "Global used vars : "
          << string::join_strings(local_used_vars_, ',');

  for (const auto var_index : unused_vars_) {
    const bool global_unused = (local_used_vars_[var_index] == 0);

    // global used but local unused, set grad
    VLOG(3) << "Var [" << var_index << "] [" << vars_[var_index]->Name()
            << "] global_unused:" << global_unused
            << "  has grad: " << HasGrad(var_index);

    if (!global_unused) {
      VLOG(3) << "Start process unused Var";
      // 1. source var base
      const auto &var_locator = variable_locators_[var_index];
      const auto group_index = var_locator.group_index;
      const auto &group = groups_[group_index];
      const auto inside_group_index = var_locator.inside_group_index;
      const auto &src_tensor = group.dense_tensors_[inside_group_index];
      // sparse no need to check and no support find_unused_parameters
      if (group.is_sparse_) {
        continue;
      }
      // 2. destination var base
      auto dest_var_base = vars_[var_index];
      auto *dest_tensor =
957
          dest_var_base->MutableVar()->GetMutable<phi::DenseTensor>();
958 959 960 961
      const auto &dest_dims = dest_tensor->dims();

      // 3. create grad var base or get grad var base
      auto grad_var_base_tmp = dest_var_base->MutableGradVarBase();
962 963 964 965
      // NOTE(haohongxiang): Calling SetIsEmpty here is to make sure that
      // gradient accumulation can continue normally after clear_gradients()
      // especiall in cases including complex control flow.
      grad_var_base_tmp->SharedVar()->SetIsEmpty(false);
966 967 968

      // 4. set grad tensor
      auto *dest_grad_tensor =
969
          grad_var_base_tmp->MutableVar()->GetMutable<phi::DenseTensor>();
970
      const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
971 972
      paddle::framework::TensorCopy(
          src_tensor, place_, *dev_ctx, dest_grad_tensor);
973 974 975 976 977 978 979 980 981 982 983 984
      dest_grad_tensor->Resize(dest_dims);
    }
  }
}

bool Reducer::HasGrad(size_t var_index) {
  const auto grad_var = vars_[var_index]->GradVarBase();
  if (!grad_var || !grad_var->Var().IsInitialized()) {
    return false;
  }

  const auto &var = grad_var->Var();
985 986
  if (var.IsType<phi::DenseTensor>()) {
    if (var.Get<phi::DenseTensor>().IsInitialized()) {
987 988
      return true;
    }
989 990
  } else if (var.IsType<phi::SelectedRows>()) {
    if (var.Get<phi::SelectedRows>().value().IsInitialized()) {
991 992 993 994 995 996 997 998 999
      return true;
    }
  } else {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Only support LoDTensor and SelectedRows for gradient var"));
  }
  return false;
}

1000
void Reducer::FinalizeBackward() {
1001
  groups_need_finalize_ = false;
1002
  grad_need_hooks_ = false;
1003

1004 1005
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
1006
    parallel_ctx_->WaitComm(i);
1007 1008
  }

1009 1010 1011 1012 1013 1014
  for (auto &group : groups_) {
    if (!group.is_sparse_) {
      group.dense_contents_.Clear();
    }
  }

1015
  if (NeedRebuildGroup()) {
1016 1017 1018 1019 1020
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    group_indices_ = std::move(rebuild_group_indices);
    InitializeGroups(group_indices_);
  }
1021

1022
  if (find_unused_vars_each_step_) {
1023
// TODO(liuyuhui) support xpu about Tensorcopy/TensorFromVector/TensorToVector
张春乔 已提交
1024
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
K
Kim Yann 已提交
1025
    defined(PADDLE_WITH_GLOO)
1026 1027 1028 1029 1030 1031 1032 1033 1034
    ProcessUnusedDenseVars();
#endif
    // Initialize local used vars
    local_used_vars_.clear();
    local_used_vars_.resize(vars_.size(), 0);
    VLOG(3) << "ProcessUnusedDenseVars is finished.";
  }

  VLOG(3) << "In the batch, Reducer is finished.";
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
1047 1048
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
1049 1050
  PADDLE_ENFORCE_EQ(vars.size(),
                    is_sparse_gradient.size(),
1051 1052 1053
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
1054 1055
                        vars.size(),
                        is_sparse_gradient.size()));
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
1067 1068
  PADDLE_ENFORCE_EQ(true,
                    check_perm(tensor_indices),
1069 1070 1071
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
1086 1087 1088 1089 1090 1091 1092

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
1093
      // we keep sparse var a single group
1094
      res.push_back({tensor_real_index});
1095 1096 1097 1098 1099 1100 1101 1102 1103
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
1104 1105
    if (var->Var().IsType<phi::DenseTensor>()) {
      var_size = var->Var().Get<phi::DenseTensor>().numel();
1106 1107 1108 1109 1110
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
1111
    group_info.first.push_back(tensor_real_index);
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
1138 1139
        group_index.empty(),
        true,
1140 1141 1142
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
1143
  if (tensor_indices.empty()) {
1144 1145
    std::sort(res.begin(),
              res.end(),
1146 1147 1148 1149
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
1150 1151 1152 1153 1154 1155
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle