reducer.cc 43.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/reducer.h"

17 18
#include <iostream>

19
#include "paddle/fluid/framework/tensor_util.h"
20
#include "paddle/fluid/imperative/layer.h"
21
#include "paddle/fluid/imperative/parallel_context.h"
22
#include "paddle/fluid/operators/math/concat_and_split.h"
23
#include "paddle/phi/kernels/funcs/strided_memcpy.h"
24
#ifdef PADDLE_WITH_XPU
25 26
#include "paddle/fluid/platform/device/xpu/enforce_xpu.h"
#endif
27
#include "paddle/fluid/string/string_helper.h"
28
#include "paddle/phi/core/dense_tensor.h"
29 30 31
namespace paddle {
namespace imperative {

K
Kim Yann 已提交
32 33
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
    defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_GLOO)
34 35
// div the nranks
void Group::DivNRanks(const platform::DeviceContext &context, int64_t nranks) {
36
  phi::DenseTensor *tensor =
37
      is_sparse_
38
          ? sparse_contents_->GetMutable<phi::SelectedRows>()->mutable_value()
39
          : dense_contents_.GetMutable<phi::DenseTensor>();
40 41

  if (platform::is_gpu_place(tensor->place())) {
42
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
43 44 45
    DivNRanks(tensor, nranks, context);
#endif
  } else if (platform::is_cpu_place(tensor->place())) {
46 47
    VLOG(4) << "before div 2" << *tensor;
    VLOG(4) << "NDiv for cpu devices : rank = " << nranks;
48 49 50 51 52 53
#ifdef PADDLE_WITH_HIP
    if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
      PADDLE_THROW(paddle::platform::errors::Fatal(
          "Unsupport BF16 in DataParallel for now"));
    }
    framework::VisitDataTypeForHIP(
54
        dtype_,
L
Leo Chen 已提交
55
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
56
#else
L
Leo Chen 已提交
57 58 59
    framework::VisitDataType(
        dtype_,
        DivNRanksForAllReduce<phi::CPUContext>(tensor, nranks, context));
60
#endif
61
    VLOG(4) << "after div 2" << *tensor;
62 63 64 65 66 67 68
  } else if (platform::is_xpu_place(tensor->place())) {
#ifdef PADDLE_WITH_XPU_BKCL
// TODO(liuyuhui) support xpu about div nranks in the future
#endif
  }
}

69 70 71
template <typename DeviceContext, typename T>
static void ConcatTensorsForAllReduce(
    const DeviceContext &context,
72
    const std::vector<phi::DenseTensor> &dense_tensors_,
73 74
    framework::Variable *p_dense_contents) {
  operators::math::ConcatFunctor<DeviceContext, T> concat_functor_;
75 76 77
  concat_functor_(context,
                  dense_tensors_,
                  0,
78
                  p_dense_contents->GetMutable<phi::DenseTensor>());
79 80 81 82
}

template <typename DeviceContext, typename T>
static void SplitTensorsForAllReduce(
83 84
    const DeviceContext &context,
    framework::Variable *p_dense_contents,
85
    std::vector<phi::DenseTensor> *p_dense_tensors) {
86
  auto *in = p_dense_contents->GetMutable<phi::DenseTensor>();
87 88
  std::vector<phi::DenseTensor *> outs;
  std::vector<const phi::DenseTensor *> shape_refer;
89 90 91

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());
92

93 94 95 96 97 98
  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  // Sometimes direct copies will be faster
  if (p_dense_tensors->size() < 10) {
99 100
    phi::funcs::StridedMemcpyWithAxis0<T, DeviceContext>(
        context, *in, shape_refer, &outs);
101 102 103 104 105 106 107 108 109 110
  } else {
    operators::math::SplitFunctor<DeviceContext, T> split_functor_;
    split_functor_(context, *in, shape_refer, 0, &outs);
  }
}

// context is used to select the stream for concat
template <typename DeviceContext>
static void ConcatTensorsWithType(
    const DeviceContext &context,
111
    const std::vector<phi::DenseTensor> &dense_tensors_,
112 113 114
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
115
    case framework::proto::VarType::FP16:
116 117
      ConcatTensorsForAllReduce<DeviceContext, platform::float16>(
          context, dense_tensors_, p_dense_contents);
118 119
      break;
    case framework::proto::VarType::FP32:
120 121
      ConcatTensorsForAllReduce<DeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
122 123
      break;
    case framework::proto::VarType::FP64:
124 125
      ConcatTensorsForAllReduce<DeviceContext, double>(
          context, dense_tensors_, p_dense_contents);
126 127 128 129 130
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
131
          framework::DataTypeToString(type)));
132 133 134 135
  }
}

// context is used to select the stream for split
136
template <typename DeviceContext>
137 138 139 140
static void SplitTensorsWithType(const DeviceContext &context,
                                 framework::Variable *p_dense_contents,
                                 std::vector<phi::DenseTensor> *p_dense_tensors,
                                 framework::proto::VarType::Type type) {
141
  switch (type) {
142
    case framework::proto::VarType::FP16:
143 144
      SplitTensorsForAllReduce<DeviceContext, platform::float16>(
          context, p_dense_contents, p_dense_tensors);
145 146
      break;
    case framework::proto::VarType::FP32:
147 148
      SplitTensorsForAllReduce<DeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
149 150
      break;
    case framework::proto::VarType::FP64:
151 152
      SplitTensorsForAllReduce<DeviceContext, double>(
          context, p_dense_contents, p_dense_tensors);
153 154 155 156 157
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
158 159 160 161
          framework::DataTypeToString(type)));
  }
}

162 163 164 165 166
#ifdef PADDLE_WITH_XPU_BKCL
template <>
void SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
167
    std::vector<phi::DenseTensor> *p_dense_tensors) {
168
  auto *in = p_dense_contents->GetMutable<phi::DenseTensor>();
169 170
  std::vector<phi::DenseTensor *> outs;
  std::vector<const phi::DenseTensor *> shape_refer;
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187

  outs.reserve(p_dense_tensors->size());
  shape_refer.reserve(p_dense_tensors->size());

  for (auto &tensor : *p_dense_tensors) {
    outs.emplace_back(&tensor);
    shape_refer.emplace_back(&tensor);
  }
  operators::math::SplitFunctor<platform::XPUDeviceContext, float>
      split_functor_;
  split_functor_(context, *in, shape_refer, 0, &outs);
}

// context is used to select the stream for concat
template <>
void ConcatTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
188
    const std::vector<phi::DenseTensor> &dense_tensors_,
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
    framework::Variable *p_dense_contents,
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      ConcatTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, dense_tensors_, p_dense_contents);
      break;
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it concats tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}

// context is used to select the stream for split
template <>
void SplitTensorsWithType<platform::XPUDeviceContext>(
    const platform::XPUDeviceContext &context,
    framework::Variable *p_dense_contents,
209
    std::vector<phi::DenseTensor> *p_dense_tensors,
210 211 212 213 214 215
    framework::proto::VarType::Type type) {
  switch (type) {
    case framework::proto::VarType::FP32:
      SplitTensorsForAllReduce<platform::XPUDeviceContext, float>(
          context, p_dense_contents, p_dense_tensors);
      break;
K
kuizhiqing 已提交
216 217 218 219 220 221 222 223 224
    default:
      PADDLE_THROW(platform::errors::Unimplemented(
          "Data type (%s) is not supported when it splits tensors for "
          "allreduce.",
          framework::DataTypeToString(type)));
  }
}
#endif

225 226 227
void Group::ConcatTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
228
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
L
Leo Chen 已提交
229 230 231 232
    ConcatTensorsWithType(static_cast<const phi::GPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
233 234 235 236
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat grad tensors since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
237 238 239 240 241
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    ConcatTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
242 243 244
        dense_tensors_,
        &dense_contents_,
        dtype_);
245 246 247 248
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't concat xpu grads since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
249
#endif
250
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
251 252 253 254
    ConcatTensorsWithType(static_cast<const phi::CPUContext &>(context),
                          dense_tensors_,
                          &dense_contents_,
                          dtype_);
255 256 257 258 259 260 261 262 263
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Concat grad tensor not supported on place (%s)", place));
  }
}

void Group::SplitTensors(const platform::DeviceContext &context) {
  auto place = context.GetPlace();
  if (platform::is_gpu_place(place)) {
264
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
L
Leo Chen 已提交
265 266 267 268
    SplitTensorsWithType(static_cast<const phi::GPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
269 270 271 272
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split grad tensor since it's not compiled with NCCL,"
        "Please recompile or reinstall Paddle with NCCL support."));
273 274 275 276 277
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU_BKCL
    SplitTensorsWithType(
        static_cast<const platform::XPUDeviceContext &>(context),
278 279 280
        &dense_contents_,
        &dense_tensors_,
        dtype_);
281 282 283 284
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't split xpu grad since it's not compiled with BKCL,"
        "Please recompile or reinstall Paddle with BKCL support."));
285
#endif
286
  } else if (platform::is_cpu_place(place)) {
L
Leo Chen 已提交
287 288 289 290
    SplitTensorsWithType(static_cast<const phi::CPUContext &>(context),
                         &dense_contents_,
                         &dense_tensors_,
                         dtype_);
291 292 293
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Split grad tensor not supported on place (%s)", place));
294 295 296 297 298
  }
}

std::ostream &operator<<(std::ostream &out, const Group &group) {
  const auto &vars = group.variable_indices_;
299
  out << "numel: " << group.all_length_ << " ;is_sparse: " << group.is_sparse_
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
      << " ;var number: " << vars.size() << "\n";
  auto begin = vars.begin();
  auto end = vars.end();
  out << "[";
  for (int i = 0; begin != end && i < 100; ++i, ++begin) {
    if (i > 0) out << ' ';
    out << *begin;
  }
  if (begin != end) {
    out << " ...";
  }
  out << "]\n";
  return out;
}

315 316 317
Reducer::Reducer(const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
                 const std::vector<std::vector<size_t>> &group_indices,
                 const std::vector<bool> &is_sparse_gradient,
318
                 std::shared_ptr<imperative::ParallelContext> parallel_ctx,
319 320
                 const std::vector<size_t> &group_size_limits,
                 bool find_unused_vars)
321 322 323
    : vars_(vars),
      group_indices_(group_indices),
      is_sparse_gradient_(is_sparse_gradient),
324
      parallel_ctx_(parallel_ctx),
325
      group_size_limits_(group_size_limits),
326
      find_unused_vars_each_step_(find_unused_vars) {
327
  VLOG(3) << "Start construct the Reducer ...";
328
  nrings_ = parallel_ctx->GetNRings();
329
  nranks_ = parallel_ctx->GetNRanks();
330 331
  // initialize groups
  InitializeGroups(group_indices);
332 333
  for (size_t global_var_index = 0; global_var_index < vars_.size();
       ++global_var_index) {
334
    auto var = vars_[global_var_index];
335 336
    var->GradVarBase()->AddVoidHook(std::make_shared<std::function<void()>>(
        [=]() { this->AddDistHook(global_var_index); }));
337
    var_index_map_[var->GradVarBase()->SharedVar().get()] = global_var_index;
338
  }
339 340 341 342 343 344

  // for checking var is ready once
  vars_marked_ready_.resize(vars_.size(), false);

  // Initialize local used vars
  local_used_vars_.resize(vars_.size(), 0);
345 346
}

347
void Reducer::InitializeDenseGroups(
348 349 350 351 352
    const std::vector<size_t> &variable_indices_, Group *p_group) {
  int64_t all_length = 0;
  for (size_t index = 0; index < variable_indices_.size(); ++index) {
    const auto variable_index = variable_indices_[index];
    const auto &var = vars_[variable_index];
353
    const auto &var_name = var->Name();
354 355
    PADDLE_ENFORCE_EQ(is_sparse_gradient_[variable_index],
                      false,
356
                      platform::errors::PreconditionNotMet(
357
                          "Tensor %s's GRAD must be LoDTensor, but received "
358 359 360
                          "GRAD is SelectedRows",
                          var_name));

361
    auto lod_tensor = var->MutableVar()->GetMutable<phi::DenseTensor>();
362 363
    PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(),
                      true,
364
                      platform::errors::PreconditionNotMet(
365
                          "Tensor %s is not initialized.", var_name));
366
    const auto size = lod_tensor->numel();
367
    PADDLE_ENFORCE_GT(
368 369
        size,
        0,
370 371
        platform::errors::PreconditionNotMet(
            "The number of tensor %s's elements is 0.", var_name));
372 373 374 375
    all_length += size;

    p_group->length_.push_back(size);

376
    // for concat operator
377
    p_group->dense_tensors_.push_back(phi::DenseTensor());
378

379
    // check the dtype and place, it must be same.
380 381
    const auto &dtype = var->DataType();
    const auto &place = var->Place();
382 383
    if (index > 0) {
      PADDLE_ENFORCE_EQ(
384 385
          dtype,
          p_group->dtype_,
386 387 388
          platform::errors::PreconditionNotMet(
              "Tensor %s has different dtype. Expected dtype is %s, but actual "
              "dtype is %s",
389 390
              var_name,
              framework::DataTypeToString(p_group->dtype_),
391
              framework::DataTypeToString(dtype)));
392 393
      PADDLE_ENFORCE_EQ(place,
                        place_,
394 395 396
                        platform::errors::PreconditionNotMet(
                            "Tensor %s has different place. Expected place is "
                            "%s, but actual place is %s",
397 398 399
                            var_name,
                            place_,
                            place));
400 401 402 403 404
    } else {
      p_group->dtype_ = dtype;
      place_ = place;
    }
  }
405
  p_group->all_length_ = all_length;
406 407 408 409 410
}

// Each parameter will be initialized according to the group information.
// For the sparse parameter, sparse_contents_ in the group directly points
// to the parameter. For dense parameters, first construct an empty Tensor().
411
// Then specify the actual memory in MarkDenseVarReady.
412 413 414 415 416 417
void Reducer::InitializeGroups(
    const std::vector<std::vector<size_t>> &group_indices) {
  VLOG(3) << "Start initialize groups ..";
  // clear the group
  groups_.clear();
  groups_.reserve(group_indices.size());
418 419
  variable_locators_.clear();
  variable_locators_.resize(vars_.size());
420 421 422 423 424

  auto group_nums = group_indices.size();
  for (size_t group_index = 0; group_index < group_nums; ++group_index) {
    const auto &variable_indices_ = group_indices[group_index];
    PADDLE_ENFORCE_GT(
425 426
        variable_indices_.size(),
        0,
427
        platform::errors::PreconditionNotMet(
428
            "The number of group[%d]'s elements is 0.", group_index));
429 430 431 432 433 434 435 436 437 438 439
    Group group;

    // It's just for check the sparse or dense
    auto first_varbase = vars_[variable_indices_.front()];
    if (variable_indices_.size() == 1 &&
        is_sparse_gradient_[variable_indices_.front()]) {
      // process the sparse gradient. one sparse, one group
      group.dtype_ = first_varbase->DataType();
      group.is_sparse_ = true;
    } else {
      // process the dense gradient.
440
      InitializeDenseGroups(variable_indices_, &group);
441
    }
442 443 444

    // map variables to this group by VariableLocator
    size_t inside_group_index = 0;
445
    for (const auto var_index : variable_indices_) {
446 447 448 449 450 451
      variable_locators_[var_index] = VariableLocator{
          .group_index = group_index,
          .inside_group_index = inside_group_index++,
      };
    }
    group.variable_indices_ = std::move(variable_indices_);
452
    groups_.emplace_back(std::move(group));
453
    // Debug Message For Reducer
454
    VLOG(3) << "The Group[" << group_index << "]:" << groups_.back();
455 456 457
  }
}

458 459
void Reducer::PrepareDeps(const std::unordered_set<GradOpNode *> &init_nodes) {
  PADDLE_ENFORCE_EQ(
460 461
      node_deps_.empty(),
      true,
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
      platform::errors::AlreadyExists("Op deps must be initialized here"));

  std::queue<GradOpNode *> q;
  std::unordered_set<GradOpNode *> visited;

  for (auto pos = init_nodes.begin(); pos != init_nodes.end(); pos++) {
    q.push(*pos);
    visited.insert(*pos);
  }

  while (!q.empty()) {
    auto *cur_node = q.front();
    q.pop();

    const auto &grad_pending_nodes = cur_node->GradPendingNodes();
    for (auto &grad_pending_node : grad_pending_nodes) {
      PADDLE_ENFORCE_NOT_NULL(
          grad_pending_node,
          platform::errors::NotFound("Grad pending node should not be null"));
481 482 483 484 485
      // py_layer is not supported in DataParallel
      auto begin = grad_pending_node->begin();
      auto end = grad_pending_node->end();
      for (auto op_base = begin; op_base != end; op_base++) {
        PADDLE_ENFORCE_EQ(
486 487
            op_base->Type() != "py_layer",
            true,
488 489 490 491 492 493 494 495 496
            platform::errors::PreconditionNotMet(
                "Note: Currently PyLayer is not supported in DataParallel. For "
                "using PyLayer in a DataParallel model, you can skip gradient "
                "synchronization among multiple cards by 'no_sync', and "
                "manually implement 'all_reduce' before model optimization. "
                "There is an example showing specific implemetation processing "
                "in offical docs: https://www.paddlepaddle.org.cn/documentation"
                "/docs/api/paddle/DataParallel_cn.html"));
      }
497 498 499 500 501 502 503 504 505
      ++node_deps_[grad_pending_node.get()];
      if (visited.count(grad_pending_node.get()) == 0) {
        visited.insert(grad_pending_node.get());
        q.push(grad_pending_node.get());
      }
    }
  }
}

506
void Reducer::TraverseBackwardGraph(
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
  node_deps_.clear();
  std::queue<std::shared_ptr<GradOpNode>> q;
  std::unordered_set<VariableWrapper *> var_visited;
  std::unordered_set<GradOpNode *> init_nodes;

  for (const auto &output : outputs) {
    const auto &grad_node = output->GradVarBase()->GradNode();
    if (grad_node == nullptr || output->OverridedStopGradient()) {
      VLOG(3) << "Skip auto grad since there is no grad op or output is "
                 "stop_gradient=True: "
              << output->Name();
      continue;
    } else {
      init_nodes.insert(grad_node.get());
      var_visited.insert(output->SharedVar().get());
      q.push(grad_node);
    }
  }

  PrepareDeps(init_nodes);
  // Traverse the autograd graph starting at the specified output
  while (!q.empty()) {
    auto cur_node = q.front();
    q.pop();

    for (const auto &cur_op : *cur_node) {
      auto &bwd_outs = cur_op.GetOutsMap();
      for (const auto &pair : bwd_outs) {
        if (!pair.second.IsGrad()) {
          continue;
        }
        for (auto &var : pair.second) {
          if (!var || var->OverridedStopGradient()) {
            continue;
          } else {
            var_visited.insert(var.get());
          }
        }
      }
    }
    for (const auto &grad_pending_node : cur_node->GradPendingNodes()) {
      PADDLE_ENFORCE_NOT_NULL(grad_pending_node,
                              platform::errors::NotFound(
                                  "Grad pending node should not be nullptr"));
      auto iter = node_deps_.find(grad_pending_node.get());
      if (iter == node_deps_.end()) {
        continue;
      }
      if (--(iter->second) == 0) {
        q.push(grad_pending_node);
      }
    }
  }

  for (const auto &it : var_index_map_) {
    if (var_visited.count(it.first) == 0) {
      unused_vars_.push_back(it.second);
      VLOG(3) << "Var[" << it.second << "] [" << it.first->Name()
              << "] is not used";
    }
  }
569
}
570

571 572 573
// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
void Reducer::PrepareForBackward(
574
    const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
575
  VLOG(3) << "after forward, then reset count for backward.";
576
  grad_need_hooks_ = true;
577 578 579 580 581 582 583 584 585 586 587
  next_group_ = 0;
  std::for_each(groups_.begin(), groups_.end(), [](Group &group) {
    group.pending_ = group.variable_indices_.size();
    group.sparse_contents_ = nullptr;
  });

  // reinitialize vars_marked_ready_ for next iteration
  vars_marked_ready_.clear();
  vars_marked_ready_.resize(vars_.size(), false);

  PADDLE_ENFORCE_EQ(
588 589
      groups_need_finalize_,
      false,
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
      platform::errors::PreconditionNotMet(
          "A serious error has occurred here. Please "
          "set find_unused_parameters=True to traverse backward graph "
          "in each step to prepare reduce in advance. If you have "
          "set, There may be several reasons for this error: "
          "1) Please note that all forward outputs derived from the module "
          "parameters must participate in the calculation of losses and "
          "subsequent gradient calculations. If not, the wrapper will hang, "
          "waiting for autograd to generate gradients for these parameters. "
          "you can use detach or stop_gradient to make the unused parameters "
          "detached from the autograd graph. "
          "2) Used multiple forwards and one backward. You may be able to wrap "
          "multiple forwards in a model."));

  // The first var to trigger the unused parameter
  has_marked_unused_vars_ = false;

  if (find_unused_vars_once_ || find_unused_vars_each_step_) {
    unused_vars_.clear();
609
    TraverseBackwardGraph(outputs);
610 611 612 613 614
    // only check once in first step
    find_unused_vars_once_ = false;
  }

  if (find_unused_vars_each_step_ && unused_vars_.empty()) {
615 616 617 618 619 620 621 622
    LOG_FIRST_N(WARNING, 1)
        << "All parameters are involved in the backward pass. "
           "It is recommended to set find_unused_parameters to False "
           "to improve performance. However, if unused parameters "
           "appear in subsequent iterative training, then an error "
           "will occur. Please make it clear that in the subsequent "
           "training, there will be no parameters that are not used "
           "in the backward pass, and then set find_unused_parameters";
623 624 625
  }

  if (unused_vars_.size() == vars_.size()) {
626 627 628 629 630 631
    LOG_FIRST_N(WARNING, 1)
        << "There is no parameter in the device involved "
           "in the backward calculation. If there are "
           "parameters on other devices involved in the "
           "backward, then a serious error will occur here.";
  }
632 633 634 635 636
}

// Add hook function to each leaf node. When the gradient of a leaf node is
// generated, if it is the sparse parameter, it will directly execute allreduce,
// if it is the dense parameter, it will execute three steps: 1,
637
// MarkDenseVarReady. Find the position of the corresponding group
638 639 640 641 642
// through var_index, share the gradient memory and the group dense_tensors,
// the group counter is reduced by 1. 2, MarkGroupReady: When the group
// counter is 0, it means that allreduce can be emitted, and
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
643
void Reducer::AddDistHook(size_t var_index) {
644 645
  PADDLE_ENFORCE_LT(var_index,
                    variable_locators_.size(),
646 647 648
                    platform::errors::OutOfRange(
                        "Out of bounds variable index. it must be less"
                        "than %d, but it is %d",
649 650
                        variable_locators_.size(),
                        var_index));
651

652 653 654 655 656
  // gradient synchronization is not required when grad_need_hooks_ is false.
  if (!grad_need_hooks_) {
    return;
  }

657 658 659
  VLOG(3) << "Var[" << var_index << "] ["
          << vars_[var_index]->GradVarBase()->Name()
          << "] arrived and triggered disthook";
660

661 662
  local_used_vars_[var_index] = 1;

663
  // rebuild group when find_unused_vars_each_step_ is false
664
  if (NeedRebuildGroup()) {
665 666 667
    rebuild_vars_.push_back(vars_[var_index]);
    rebuild_var_indices_.push_back(var_index);
  }
668

669
  if (!has_marked_unused_vars_) {
670 671 672 673 674 675
    has_marked_unused_vars_ = true;
    for (const auto &unused_index : unused_vars_) {
      MarkVarReady(unused_index, false);
    }
  }

676 677
  MarkVarReady(var_index, true);
}
678

679
void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
680 681
  groups_need_finalize_ = true;

682
  const auto &var_locator = variable_locators_[var_index];
683
  const auto group_index = var_locator.group_index;
684
  auto &group = groups_[group_index];
685

686 687 688 689
  // error happened, if the var is ready before.
  if (vars_marked_ready_[var_index]) {
    auto error_info = string::Sprintf(
        "Error happened, when parameter[%d][%s] has been ready before. "
690 691 692
        "Please set find_unused_parameters=True to traverse backward graph "
        "in each step to prepare reduce in advance. If you have set, "
        "there may be several reasons for this error: "
693 694 695 696
        "1) In multiple reentrant backward phase, some parameters are reused."
        "2) Using model parameters outside of forward function. Please "
        "make sure that model parameters are not shared in concurrent "
        "forward-backward passes.",
697 698
        var_index,
        vars_[var_index]->GradVarBase()->Name());
699

700 701
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      false,
702 703 704 705 706 707 708 709 710 711 712 713 714 715
                      platform::errors::PreconditionNotMet(error_info));

    error_info +=
        "3) Unused parameters retrieval is incorrect. "
        "The return value of forward will be used to retrieve"
        " the unused parameters of the entire model. These "
        "gradients of unused parameters will not be synchronized "
        "between multiple cards. However, if the unused "
        "parameters participate in the backward calculation "
        "again at a later time (e.g. after the forward function, "
        "the loss calculation uses the unused "
        "paramters of the forward and trigger backward), "
        "its gradient will be wrong.";

716 717
    PADDLE_ENFORCE_EQ(has_marked_unused_vars_,
                      true,
718 719 720 721 722
                      platform::errors::PreconditionNotMet(error_info));
  } else {
    vars_marked_ready_[var_index] = true;
  }

723 724
  if (!group.is_sparse_) {
    // process dense group
725 726
    const auto inside_group_index = var_locator.inside_group_index;
    const auto length = group.length_[inside_group_index];
727
    auto &group_tensor = group.dense_tensors_[inside_group_index];
728

729
    if (is_used_var) {
730
      auto var_base = vars_[var_index]->GradVarBase();
731
      auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
732 733
      group_tensor.ShareDataWith(*tensor).Resize(
          {static_cast<int64_t>(length)});
734
    } else {
735 736
      // TODO(shenliang03): maybe save the memory
      // by avoiding tensor construction
737 738
      if (!group_tensor.IsInitialized()) {
        group_tensor.Resize({static_cast<int64_t>(length)});
739
        group_tensor.mutable_data(place_,
740
                                  framework::TransToPhiDataType(group.dtype_));
741 742
      }

743
#ifdef PADDLE_WITH_XPU_BKCL
744
      if (platform::is_xpu_place(group_tensor.place())) {
745 746 747 748
        auto dev_ctx = static_cast<platform::XPUDeviceContext *>(
            platform::DeviceContextPool::Instance().Get(place_));
        if (HasGrad(var_index)) {
          auto var_base = vars_[var_index]->GradVarBase();
749
          auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
750 751 752 753 754 755 756 757 758 759 760
          group_tensor.ShareDataWith(*tensor).Resize(
              {static_cast<int64_t>(length)});
        } else {
          group_tensor.Resize({static_cast<int64_t>(length)});
          int r = xpu::constant(dev_ctx->x_context(),
                                reinterpret_cast<float *>(group_tensor.data()),
                                group_tensor.numel(),
                                0.0f);
          PADDLE_ENFORCE_XDNN_SUCCESS(r, "constant");
          PADDLE_ENFORCE_XPU_SUCCESS(xpu_wait(dev_ctx->stream()));
        }
761
      }
762
#else
763 764 765
      auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
      if (HasGrad(var_index)) {
        auto var_base = vars_[var_index]->GradVarBase();
766
        auto tensor = var_base->MutableVar()->GetMutable<phi::DenseTensor>();
767 768
        group_tensor.ShareDataWith(*tensor).Resize(
            {static_cast<int64_t>(length)});
769 770
      } else {
        group_tensor.Resize({static_cast<int64_t>(length)});
771
        phi::funcs::set_constant(*dev_ctx, &group_tensor, 0.0);
772
      }
773
#endif
774 775 776
    }
  } else {
    // process sparse group
777
    PADDLE_ENFORCE_EQ(
778 779
        HasGrad(var_index),
        true,
780 781 782 783 784 785 786
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] should have gradient. "
            "Currently, DataParallel does not support sparse "
            "parameters without generating gradients during training. "
            "For example, if is_sparese=True is used in Embedding, "
            "the current step of this parameter cannot generate gradient "
            "because of stop_gradient/detatch, where error will occur.",
787 788
            var_index,
            vars_[var_index]->Name()));
789 790 791
    auto var_base = vars_[var_index]->GradVarBase();
    // need to check tensor type
    PADDLE_ENFORCE_EQ(
792 793
        var_base->Var().IsType<phi::SelectedRows>(),
        true,
794 795 796 797 798 799 800 801 802
        platform::errors::PreconditionNotMet(
            "The sparse parameter[%d][%s] must have a selectedrows gradient. "
            "Before forward pass, the parameter type is inferred to be "
            "SelectedRows, but after backward pass, its actual type becomes "
            "LodTensor. It is currently not supported by DataParallel. "
            "For example, if sparse embedding is used, and the weight of "
            "embedding is shared with subsequent dense parameters, then "
            "the parameter gradient of the embedding will be converted "
            "to dense parameters.",
803 804
            var_index,
            vars_[var_index]->Name()));
805 806

    group.sparse_contents_ = var_base->MutableVar();
807
  }
808

809 810 811 812 813 814 815 816 817 818
  if (--group.pending_ == 0) {
    // can start allreduce
    MarkGroupReady(group_index);
  }

  if (next_group_ == groups_.size()) {
    FinalizeBackward();
  }
}

819
// TODO(liuyuhui): If BKCL support non-blocking communication, it should be
820
// fixed as same as multi gpus card training.
821
void Reducer::MarkGroupReady(size_t group_index) {
822
  PADDLE_ENFORCE_GE(
823 824
      group_index,
      next_group_,
825 826 827 828
      platform::errors::PreconditionNotMet(
          "The index of the incoming group must be greater "
          "than or equal to the previously synchronized group index, "
          "expect it to greater than or equal to %d, but got %d.",
829 830
          next_group_,
          group_index));
831

832
  if (group_index > next_group_) {
833
    VLOG(3) << "It will adjust the order of group in next batch automatically";
834 835 836 837 838
    return;
  }

  for (; next_group_ < groups_.size() && groups_[next_group_].pending_ == 0;
       ++next_group_) {
839 840
    UNUSED auto &group = groups_[next_group_];
    UNUSED const int run_order = next_group_ % nrings_;
841

842
    auto *tensor = group.dense_contents_.GetMutable<phi::DenseTensor>();
843 844 845
    tensor->Resize(phi::make_ddim({group.all_length_}))
        .mutable_data(place_, framework::TransToPhiDataType(group.dtype_));

846 847 848 849 850 851
    // For CUDA or XPU, compute_stream --> comm_stream.
    // For CPU, do nothing.
    // NOTE. Because concat uses the comm_stream,
    // so we expose WaitCompute() interface and call
    // it here.
    parallel_ctx_->WaitCompute(run_order);
852
    FusedAllReduceSchedule(run_order, group, next_group_);
853 854 855
  }
}

856 857
void Reducer::FusedAllReduceSchedule(const int run_order,
                                     Group &group,
858 859 860 861
                                     const int curr_group_index) {
  // The overall timeline: concat > div_nranks > allreduce > split
  // dev_context is used to select different stream
  const auto &dev_context = *parallel_ctx_->GetDeviceContext(run_order);
862
  if (group.is_sparse_) {
863 864 865
    VLOG(3) << "sparse group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
    group.DivNRanks(dev_context, nranks_);
866 867
    parallel_ctx_->AllReduceByStream(
        *group.sparse_contents_, group.sparse_contents_, run_order, false);
868
  } else {
869 870
    VLOG(3) << "dense group [" << curr_group_index
            << "] start allreduce in ring[" << run_order << "]";
871 872
    // Select common commstream to concat tensors
    // group.dense_tensors ---> group.dense_contents_
873
    group.ConcatTensors(dev_context);
874

875
    group.DivNRanks(dev_context, nranks_);
876 877 878
    // Start allreduce
    parallel_ctx_->AllReduceByStream(
        group.dense_contents_, &(group.dense_contents_), run_order, false);
879

880
    // Select communication stream to split tensors
881
    // group.dense_contents_ ---> group.dense_tensors
882
    group.SplitTensors(dev_context);
883 884 885
  }
}

886
std::vector<std::vector<size_t>> Reducer::RebuildGruops() {
887 888 889 890
  VLOG(3) << "The order of parameter arrival: "
          << string::join_strings(rebuild_var_indices_, ',');

  PADDLE_ENFORCE_EQ(
891 892
      rebuild_vars_.size(),
      vars_.size(),
893 894 895
      platform::errors::PreconditionNotMet(
          "Rebuild vars's number should be equal to original vars'number, "
          "expect it to be %d, but got %d.",
896 897
          vars_.size(),
          rebuild_vars_.size()));
898 899
  std::reverse(rebuild_vars_.begin(), rebuild_vars_.end());
  std::reverse(rebuild_var_indices_.begin(), rebuild_var_indices_.end());
900 901 902 903
  auto rebuild_group_indices = AssignGroupBySize(rebuild_vars_,
                                                 is_sparse_gradient_,
                                                 group_size_limits_,
                                                 rebuild_var_indices_);
904 905 906 907 908 909 910
  has_rebuilt_group_ = true;
  rebuild_vars_.clear();
  rebuild_var_indices_.clear();
  std::reverse(rebuild_group_indices.begin(), rebuild_group_indices.end());
  return rebuild_group_indices;
}

911 912 913 914 915 916 917
void Reducer::ProcessUnusedDenseVars() {
  // The calculation stream must be used here to
  // avoid conflicts with communication.
  VLOG(3) << "Local used vars : "
          << string::join_strings(local_used_vars_, ',');
  const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
  // H2D is to allreduce the local_used_vars_
918
  auto *global_used_tensor = global_used_vars_.GetMutable<phi::DenseTensor>();
919 920 921 922 923 924
  framework::TensorFromVector<int>(
      local_used_vars_, *dev_ctx, global_used_tensor);
  parallel_ctx_->AllReduceByStream(
      global_used_vars_, &global_used_vars_, 0, true);
  framework::TensorToVector<int>(
      *global_used_tensor, *dev_ctx, &local_used_vars_);
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954

  // sync compute stream to get global used var message,
  // but maybe affect speed performance
  parallel_ctx_->SynchronizeCompute();
  VLOG(3) << "Global used vars : "
          << string::join_strings(local_used_vars_, ',');

  for (const auto var_index : unused_vars_) {
    const bool global_unused = (local_used_vars_[var_index] == 0);

    // global used but local unused, set grad
    VLOG(3) << "Var [" << var_index << "] [" << vars_[var_index]->Name()
            << "] global_unused:" << global_unused
            << "  has grad: " << HasGrad(var_index);

    if (!global_unused) {
      VLOG(3) << "Start process unused Var";
      // 1. source var base
      const auto &var_locator = variable_locators_[var_index];
      const auto group_index = var_locator.group_index;
      const auto &group = groups_[group_index];
      const auto inside_group_index = var_locator.inside_group_index;
      const auto &src_tensor = group.dense_tensors_[inside_group_index];
      // sparse no need to check and no support find_unused_parameters
      if (group.is_sparse_) {
        continue;
      }
      // 2. destination var base
      auto dest_var_base = vars_[var_index];
      auto *dest_tensor =
955
          dest_var_base->MutableVar()->GetMutable<phi::DenseTensor>();
956 957 958 959
      const auto &dest_dims = dest_tensor->dims();

      // 3. create grad var base or get grad var base
      auto grad_var_base_tmp = dest_var_base->MutableGradVarBase();
960 961 962 963
      // NOTE(haohongxiang): Calling SetIsEmpty here is to make sure that
      // gradient accumulation can continue normally after clear_gradients()
      // especiall in cases including complex control flow.
      grad_var_base_tmp->SharedVar()->SetIsEmpty(false);
964 965 966

      // 4. set grad tensor
      auto *dest_grad_tensor =
967
          grad_var_base_tmp->MutableVar()->GetMutable<phi::DenseTensor>();
968
      const auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place_);
969 970
      paddle::framework::TensorCopy(
          src_tensor, place_, *dev_ctx, dest_grad_tensor);
971 972 973 974 975 976 977 978 979 980 981 982
      dest_grad_tensor->Resize(dest_dims);
    }
  }
}

bool Reducer::HasGrad(size_t var_index) {
  const auto grad_var = vars_[var_index]->GradVarBase();
  if (!grad_var || !grad_var->Var().IsInitialized()) {
    return false;
  }

  const auto &var = grad_var->Var();
983 984
  if (var.IsType<phi::DenseTensor>()) {
    if (var.Get<phi::DenseTensor>().IsInitialized()) {
985 986
      return true;
    }
987 988
  } else if (var.IsType<phi::SelectedRows>()) {
    if (var.Get<phi::SelectedRows>().value().IsInitialized()) {
989 990 991 992 993 994 995 996 997
      return true;
    }
  } else {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Only support LoDTensor and SelectedRows for gradient var"));
  }
  return false;
}

998
void Reducer::FinalizeBackward() {
999
  groups_need_finalize_ = false;
1000
  grad_need_hooks_ = false;
1001

1002 1003
  // Must prevent compute_stream_ starting until all comm streams have finished
  for (int i = 0; i < nrings_; ++i) {
1004
    parallel_ctx_->WaitComm(i);
1005 1006
  }

1007 1008 1009 1010 1011 1012
  for (auto &group : groups_) {
    if (!group.is_sparse_) {
      group.dense_contents_.Clear();
    }
  }

1013
  if (NeedRebuildGroup()) {
1014 1015 1016 1017 1018
    VLOG(3) << "Start rebuilding the groups";
    auto rebuild_group_indices = RebuildGruops();
    group_indices_ = std::move(rebuild_group_indices);
    InitializeGroups(group_indices_);
  }
1019

1020
  if (find_unused_vars_each_step_) {
1021
// TODO(liuyuhui) support xpu about Tensorcopy/TensorFromVector/TensorToVector
张春乔 已提交
1022
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
K
Kim Yann 已提交
1023
    defined(PADDLE_WITH_GLOO)
1024 1025 1026 1027 1028 1029 1030 1031 1032
    ProcessUnusedDenseVars();
#endif
    // Initialize local used vars
    local_used_vars_.clear();
    local_used_vars_.resize(vars_.size(), 0);
    VLOG(3) << "ProcessUnusedDenseVars is finished.";
  }

  VLOG(3) << "In the batch, Reducer is finished.";
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
}

// According to the size of each parameter, it is allocated to different groups.
// The sparse parameter occupies a group exclusively. The dense parameters of
// the same data type are assigned to the same group. When dividing groups, the
// size of each group will be limited according to each value in
// group_size_limits in turn. When it is not enough, it will be divided
// by the last value of group_size_limits. The limit value is 0, which
// means that the parameter will monopolize the group.
std::vector<std::vector<size_t>> AssignGroupBySize(
    const std::vector<std::shared_ptr<imperative::VarBase>> &vars,
    const std::vector<bool> &is_sparse_gradient,
1045 1046
    const std::vector<size_t> &group_size_limits,
    const std::vector<int64_t> &tensor_indices) {
1047 1048
  PADDLE_ENFORCE_EQ(vars.size(),
                    is_sparse_gradient.size(),
1049 1050 1051
                    platform::errors::PreconditionNotMet(
                        "vars len must be equal to is_sparse_gradient len, but "
                        "[%lu] != [%lu]",
1052 1053
                        vars.size(),
                        is_sparse_gradient.size()));
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
  auto check_perm = [](const std::vector<int64_t> &x) -> bool {
    size_t len = x.size();
    std::vector<size_t> cnt(len, 0);
    for (size_t i = 0; i < len; ++i) {
      if (x[i] >= static_cast<int64_t>(len) || x[i] < 0 || cnt[x[i]]) {
        return false;
      }
      cnt[x[i]]++;
    }
    return true;
  };
1065 1066
  PADDLE_ENFORCE_EQ(true,
                    check_perm(tensor_indices),
1067 1068 1069
                    platform::errors::PreconditionNotMet(
                        "tensor_indices must be a permutation from 0 to %lu",
                        tensor_indices.size()));
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
  // the return vector
  std::vector<std::vector<size_t>> res;

  // Key: the var type
  // Value: should use which index in group_size_limits for group size limit
  std::unordered_map<std::string, size_t> group_limit_index;

  // Key: the var type
  // Value: <the var index in input tensors, total numel in this group>
  std::unordered_map<std::string, std::pair<std::vector<size_t>, size_t>>
      next_group;

  for (size_t i = 0; i < vars.size(); ++i) {
    const auto &var = vars[i];
1084 1085 1086 1087 1088 1089 1090

    size_t tensor_real_index = i;
    if (!tensor_indices.empty()) {
      tensor_real_index = tensor_indices[i];
    }

    if (is_sparse_gradient[tensor_real_index]) {
1091
      // we keep sparse var a single group
1092
      res.push_back({tensor_real_index});
1093 1094 1095 1096 1097 1098 1099 1100 1101
      continue;
    }

    const auto &var_dtype = var->DataType();
    const auto var_dtype_str = framework::DataTypeToString(var_dtype);
    VLOG(3) << "var[" << var->GradVarName() << "] 's type is "
            << var->DataType();
    auto &group_info = next_group[var_dtype_str];
    int64_t var_size = -1;
1102 1103
    if (var->Var().IsType<phi::DenseTensor>()) {
      var_size = var->Var().Get<phi::DenseTensor>().numel();
1104 1105 1106 1107 1108
    } else {
      VLOG(3) << "var " << var->Name()
              << " is not tensor or selected_rows, so skip it";
      continue;
    }
1109
    group_info.first.push_back(tensor_real_index);
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
    group_info.second += framework::SizeOfType(var_dtype) * var_size;

    if (group_limit_index.find(var_dtype_str) == group_limit_index.end()) {
      // means it is the first var of var_dtype
      group_limit_index[var_dtype_str] = 0;
    }
    auto &cur_limit_index = group_limit_index[var_dtype_str];
    if (group_info.second >= group_size_limits[cur_limit_index]) {
      // exceed group capacity and create a new group
      res.emplace_back(std::move(group_info.first));
      group_info = std::pair<std::vector<size_t>, size_t>();
      cur_limit_index =
          (std::min)(cur_limit_index + 1, group_size_limits.size() - 1);
    }
  }

  // add the final groups
  for (auto &e : next_group) {
    auto &group_info = e.second;
    if (!group_info.first.empty()) {
      res.emplace_back(std::move(group_info.first));
    }
  }

  for (const auto &group_index : res) {
    PADDLE_ENFORCE_NE(
1136 1137
        group_index.empty(),
        true,
1138 1139 1140
        platform::errors::PreconditionNotMet(
            "AssignGroupBySize construct empty group, please check."));
  }
1141
  if (tensor_indices.empty()) {
1142 1143
    std::sort(res.begin(),
              res.end(),
1144 1145 1146 1147
              [](const std::vector<size_t> &x, const std::vector<size_t> &y) {
                return x.front() < y.front();
              });
  }
1148 1149 1150 1151 1152 1153
  return res;
}
#endif

}  // namespace imperative
}  // namespace paddle