gradient_accumulator.cc 26.9 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/gradient_accumulator.h"
16

J
Jiabin Yang 已提交
17 18 19
#include <algorithm>
#include <memory>
#include <utility>
20

J
Jiabin Yang 已提交
21 22 23 24 25
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
26
#include "paddle/fluid/operators/math/selected_rows_functor.h"
27 28
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
J
Jiabin Yang 已提交
29
#include "paddle/fluid/platform/device_context.h"
30
#include "paddle/fluid/platform/float16.h"
J
Jiabin Yang 已提交
31
#include "paddle/fluid/platform/profiler.h"
H
hong 已提交
32 33 34
#ifdef PADDLE_WITH_XPU
#include "xpu/refactor/math.h"
#endif
J
Jiabin Yang 已提交
35 36 37 38

namespace paddle {
namespace imperative {

39 40 41
static void MoveOrCopyVar(framework::Variable* dst, framework::Variable* src,
                          bool force_copy) {
  if (!force_copy) {
42
    VLOG(6) << "Just Move Variable when sum gradients within this graph";
43 44 45 46
    *dst = std::move(*src);
    return;
  }

47
  VLOG(6) << "Copy occurs when sum gradients within this graph";
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
  if (src->IsType<framework::LoDTensor>()) {
    auto& src_tensor = src->Get<framework::LoDTensor>();
    if (!dst->IsType<framework::LoDTensor>()) {
      dst->Clear();
    }
    auto* dst_tensor = dst->GetMutable<framework::LoDTensor>();
    framework::TensorCopy(src_tensor, src_tensor.place(), dst_tensor);
    dst_tensor->set_lod(src_tensor.lod());
  } else if (src->IsType<framework::SelectedRows>()) {
    auto& src_selected_rows = src->Get<framework::SelectedRows>();
    if (!dst->IsType<framework::SelectedRows>()) {
      dst->Clear();
    }
    auto* dst_selected_rows = dst->GetMutable<framework::SelectedRows>();
    framework::TensorCopy(src_selected_rows.value(),
                          src_selected_rows.value().place(),
                          dst_selected_rows->mutable_value());
    dst_selected_rows->set_rows(src_selected_rows.rows());
    dst_selected_rows->set_height(src_selected_rows.height());
  } else {
    PADDLE_THROW(platform::errors::PermissionDenied(
69
        "Only support LoDTensor and SelectedRows for sum gradient"));
70 71 72
  }
}

J
Jiabin Yang 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85
template <typename T>
class TensorAddFunctor : public boost::static_visitor<> {
 public:
  TensorAddFunctor(int64_t numel, const T* x, T* y)
      : numel_(numel), x_(x), y_(y) {}

  void operator()(const platform::CPUPlace& place) {
    platform::CPUDeviceContext* ctx = dynamic_cast<platform::CPUDeviceContext*>(
        platform::DeviceContextPool::Instance().Get(place));
    auto blas = operators::math::GetBlas<platform::CPUDeviceContext, T>(*ctx);
    blas.AXPY(numel_, 1., x_, y_);
  }

H
hong 已提交
86 87 88 89 90 91 92
#ifdef PADDLE_WITH_XPU
  void operator()(const platform::XPUPlace& place) {
    platform::XPUDeviceContext* ctx = dynamic_cast<platform::XPUDeviceContext*>(
        platform::DeviceContextPool::Instance().Get(place));
    xpu::add<T>(ctx->x_context(), x_, y_, y_, static_cast<int>(numel_));
  }
#else
93 94 95 96 97 98
  void operator()(const platform::XPUPlace& place) {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Gradient accumulation on place (%s) "
        "is not supported in imperative mode",
        place));
  }
H
hong 已提交
99
#endif
100

101
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
J
Jiabin Yang 已提交
102 103 104 105 106 107 108 109 110
  void operator()(const platform::CUDAPlace& place) {
    platform::CUDADeviceContext* ctx =
        dynamic_cast<platform::CUDADeviceContext*>(
            platform::DeviceContextPool::Instance().Get(place));
    auto blas = operators::math::GetBlas<platform::CUDADeviceContext, T>(*ctx);
    blas.AXPY(numel_, 1., x_, y_);
  }
#else
  void operator()(const platform::CUDAPlace& place) {
111
    PADDLE_THROW(platform::errors::PermissionDenied(
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
        "Gradient accumulation on place (%s) "
        "is not supported in imperative mode",
        place));
  }
#endif

#ifdef PADDLE_WITH_ASCEND_CL
  void operator()(const platform::NPUPlace& place) {
    // TODO(zhiqiu): SUPPORT it
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Gradient accumulation on place (%s) "
        "is not supported in imperative mode",
        place));
  }
#else
  void operator()(const platform::NPUPlace& place) {
    PADDLE_THROW(platform::errors::PermissionDenied(
129 130 131
        "Gradient accumulation on place (%s) "
        "is not supported in imperative mode",
        place));
J
Jiabin Yang 已提交
132 133 134
  }
#endif

135 136 137 138 139 140
  void operator()(const platform::NPUPinnedPlace& place) {
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Gradient accumulation on place (%s) "
        "is not supported in imperative mode",
        place));
  }
J
Jiabin Yang 已提交
141 142
  // there is NO blas in CUDAPinnedPlace
  void operator()(const platform::CUDAPinnedPlace& place) {
143 144 145 146
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Gradient accumulation on place (%s) "
        "is not supported in imperative mode",
        place));
J
Jiabin Yang 已提交
147 148 149 150 151 152 153 154
  }

 private:
  int64_t numel_;
  const T* x_;
  T* y_;
};

155 156 157 158 159 160 161 162 163 164
template <typename DeviceContext, typename T>
void TensorAddImpl(const framework::Tensor& src, framework::Tensor* dst,
                   const platform::Place& place) {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  paddle::platform::DeviceContext* ctx = pool.Get(place);
  auto dev_ctx = dynamic_cast<DeviceContext*>(ctx);
  operators::math::ElementwiseAddTo<DeviceContext, T> func;
  func(dev_ctx, src, dst);
}

J
Jiabin Yang 已提交
165 166 167 168 169 170 171 172 173 174 175 176
void TensorAdd(const framework::Variable& src, framework::Variable* dst) {
  auto* dst_tensor = dst->GetMutable<framework::LoDTensor>();
  auto& src_tensor = src.Get<framework::LoDTensor>();

  auto numel = src_tensor.numel();

  // FIXME(minqiyang): loss_grad op will pass a zero grad of label
  // ugly fix for it
  if (numel == 0) {
    return;
  }

177 178 179 180 181 182 183
  PADDLE_ENFORCE_EQ(
      dst_tensor->numel(), numel,
      platform::errors::PreconditionNotMet(
          "The number of elements of source tensor and destination tensor "
          "should be equal, but got the number of elements of source tensor is "
          "%zu and the number of elements of destination tensor is %zu.",
          numel, dst_tensor->numel()));
J
Jiabin Yang 已提交
184 185 186 187

  auto data_type = src_tensor.type();
  auto place = src_tensor.place();

188
#define PADDLE_TENSOR_ADD(cpp_type)                                  \
J
Jiabin Yang 已提交
189 190 191 192 193 194 195 196
  if (data_type == framework::DataTypeTrait<cpp_type>::DataType()) { \
    TensorAddFunctor<cpp_type> func(                                 \
        numel, src_tensor.data<cpp_type>(),                          \
        dst_tensor->mutable_data<cpp_type>(place));                  \
    boost::apply_visitor(func, place);                               \
    return;                                                          \
  }

197
  PADDLE_TENSOR_ADD(float);
H
hong 已提交
198 199
#ifndef PADDLE_WITH_XPU
  // NOTE(phlrain): xpu only support float
200
  PADDLE_TENSOR_ADD(double);
201 202 203 204
  // NOTE(chenweihang): only support complex grad tensor accumulated,
  // support selected rows if needed in the future
  PADDLE_TENSOR_ADD(platform::complex64);
  PADDLE_TENSOR_ADD(platform::complex128);
H
hong 已提交
205
#endif
J
Jiabin Yang 已提交
206

207
#undef PADDLE_TENSOR_ADD
J
Jiabin Yang 已提交
208

209 210
  if (data_type == framework::proto::VarType::FP16) {
    if (platform::is_gpu_place(place)) {
211
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
      return TensorAddImpl<platform::CUDADeviceContext, platform::float16>(
          src_tensor, dst_tensor, place);
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "Gradient accumulation of data type (%s) on place (%s) is not "
          "supported in imperative mode",
          framework::DataTypeToString(data_type), place));
#endif
    } else if (platform::is_cpu_place(place)) {
      return TensorAddImpl<platform::CPUDeviceContext, platform::float16>(
          src_tensor, dst_tensor, place);
    }
  }
  PADDLE_THROW(platform::errors::Unimplemented(
      "Gradient accumulation of data type (%s) on place (%s) is not "
      "supported in imperative mode",
      framework::DataTypeToString(data_type), place));
J
Jiabin Yang 已提交
229 230
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
void SelectedRowsAddToTensor(const framework::Variable& src,
                             framework::Variable* dst) {
  auto* dst_tensor = dst->GetMutable<framework::LoDTensor>();
  auto& src_selected_rows = src.Get<framework::SelectedRows>();
  auto place = dst_tensor->place();
  auto data_type = src_selected_rows.value().type();
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();

#define PADDLE_SELECTED_ROWS_ADD_TO_TENSOR(dev_ctx_type, cpp_type)           \
  if (data_type == framework::DataTypeTrait<cpp_type>::DataType()) {         \
    paddle::platform::DeviceContext* dev_ctx = pool.Get(place);              \
    paddle::operators::math::SelectedRowsAddToTensor<dev_ctx_type, cpp_type> \
        functor;                                                             \
    functor(*(dynamic_cast<dev_ctx_type*>(dev_ctx)), src_selected_rows,      \
            dst_tensor);                                                     \
    return;                                                                  \
  }

249
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
250 251 252 253 254 255 256
  if (paddle::platform::is_gpu_place(place)) {
    PADDLE_SELECTED_ROWS_ADD_TO_TENSOR(platform::CUDADeviceContext, float);
    PADDLE_SELECTED_ROWS_ADD_TO_TENSOR(platform::CUDADeviceContext, double);
  } else {
#endif
    PADDLE_SELECTED_ROWS_ADD_TO_TENSOR(platform::CPUDeviceContext, float);
    PADDLE_SELECTED_ROWS_ADD_TO_TENSOR(platform::CPUDeviceContext, double);
257
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
258 259 260 261 262 263 264 265 266 267
  }
#endif

#undef PADDLE_SELECTED_ROWS_ADD_TO_TENSOR

  PADDLE_THROW(platform::errors::InvalidArgument(
      "Not supported data type %s for SelectedRowsAddToTensor",
      framework::DataTypeToString(data_type)));
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
static void SelectedRowsAddTensor(
    const framework::Variable& src_selected_rows_var,
    const framework::Variable& src_tensor_var,
    framework::Variable* dst_tensor_var) {
  const auto& src_selected_rows =
      src_selected_rows_var.Get<framework::SelectedRows>();
  const auto& src_tensor = src_tensor_var.Get<framework::LoDTensor>();
  const auto& place = src_tensor.place();
  auto data_type = src_tensor.type();
  auto* dev_ctx = platform::DeviceContextPool::Instance().Get(place);

  auto* dst_tensor = dst_tensor_var->GetMutable<framework::LoDTensor>();
  dst_tensor->Resize(src_tensor.dims());
  dst_tensor->mutable_data(place, data_type);

#define PADDLE_SELECTED_ROWS_ADD_TENSOR(dev_ctx_type, cpp_type)            \
  if (data_type == framework::DataTypeTrait<cpp_type>::DataType()) {       \
    paddle::operators::math::SelectedRowsAddTensor<dev_ctx_type, cpp_type> \
        functor;                                                           \
    functor(*(dynamic_cast<dev_ctx_type*>(dev_ctx)), src_selected_rows,    \
            src_tensor, dst_tensor);                                       \
    return;                                                                \
  }

292
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
293 294 295 296 297 298 299
  if (platform::is_gpu_place(place)) {
    PADDLE_SELECTED_ROWS_ADD_TENSOR(platform::CUDADeviceContext, float);
    PADDLE_SELECTED_ROWS_ADD_TENSOR(platform::CUDADeviceContext, double);
  } else {
#endif
    PADDLE_SELECTED_ROWS_ADD_TENSOR(platform::CPUDeviceContext, float);
    PADDLE_SELECTED_ROWS_ADD_TENSOR(platform::CPUDeviceContext, double);
300
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
301 302 303 304 305 306 307 308 309 310
  }
#endif

  PADDLE_THROW(platform::errors::InvalidArgument(
      "Not supported data type %s for SelectedRowsAddToTensor",
      framework::DataTypeToString(data_type)));

#undef PADDLE_SELECTED_ROWS_ADD_TENSOR
}

311 312 313
// Note(chenweihang): when two selected rows need to be added,
//   adding one to another is not equal to merging two selected rows
//   to one then add it to a empty selected rows, the after is correct
314 315
std::shared_ptr<VariableWrapper> SelectedRowsMerge(
    const framework::Variable& src1, const framework::Variable& src2) {
316 317 318 319 320 321 322 323 324
  auto& src_selected_rows1 = src1.Get<framework::SelectedRows>();
  auto& src_selected_rows2 = src2.Get<framework::SelectedRows>();
  auto place = src_selected_rows1.value().place();
  auto data_type = src_selected_rows1.value().type();
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();

  std::vector<const framework::SelectedRows*> src_selected_rows;
  src_selected_rows.emplace_back(&src_selected_rows1);
  src_selected_rows.emplace_back(&src_selected_rows2);
325
  auto dst_var = std::make_shared<VariableWrapper>("Temp");
326 327 328 329 330 331 332 333 334 335 336 337 338
  auto* dst_selected_rows =
      dst_var->MutableVar()->GetMutable<framework::SelectedRows>();

#define PADDLE_SELECTED_ROWS_ADD(dev_ctx_type, cpp_type)                  \
  if (data_type == framework::DataTypeTrait<cpp_type>::DataType()) {      \
    paddle::platform::DeviceContext* dev_ctx = pool.Get(place);           \
    paddle::operators::math::scatter::MergeAdd<dev_ctx_type, cpp_type>    \
        merge_add;                                                        \
    merge_add(*(dynamic_cast<dev_ctx_type*>(dev_ctx)), src_selected_rows, \
              dst_selected_rows);                                         \
    return dst_var;                                                       \
  }

339
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
340 341 342 343 344 345 346
  if (paddle::platform::is_gpu_place(place)) {
    PADDLE_SELECTED_ROWS_ADD(platform::CUDADeviceContext, float);
    PADDLE_SELECTED_ROWS_ADD(platform::CUDADeviceContext, double);
  } else {
#endif
    PADDLE_SELECTED_ROWS_ADD(platform::CPUDeviceContext, float);
    PADDLE_SELECTED_ROWS_ADD(platform::CPUDeviceContext, double);
347
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
348 349 350 351 352 353 354 355 356 357
  }
#endif

#undef PADDLE_SELECTED_ROWS_ADD

  PADDLE_THROW(platform::errors::InvalidArgument(
      "Not supported data type %s for SelectedRowsMerge",
      framework::DataTypeToString(data_type)));
}

358
void VariableWrapperAdd(std::shared_ptr<VariableWrapper> var,
359
                        VariableWrapper* dst_var, bool unchange_input) {
360
  auto& src = var->Var();
361
  auto* dst = dst_var->MutableVar();
362 363 364 365 366 367 368 369 370 371 372 373
  if (dst->IsType<framework::LoDTensor>()) {
    if (src.IsType<framework::LoDTensor>()) {
      TensorAdd(src, dst);
    } else if (src.IsType<framework::SelectedRows>()) {
      SelectedRowsAddToTensor(src, dst);
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Unexpected branch, output variable type is %s",
          framework::ToTypeName(dst->Type())));
    }
  } else {
    if (src.IsType<framework::LoDTensor>()) {
374 375 376 377 378 379 380 381 382
      if (unchange_input) {
        framework::Variable new_dst;
        SelectedRowsAddTensor(*dst, src, &new_dst);
        *dst = std::move(new_dst);
      } else {
        auto* src_mutable = var->MutableVar();
        SelectedRowsAddToTensor(*dst, src_mutable);
        *dst = std::move(*(var->MutableVar()));
      }
383
    } else if (src.IsType<framework::SelectedRows>()) {
384
      auto temp = SelectedRowsMerge(src, *dst);
385 386 387 388 389 390 391 392 393
      *dst = std::move(*(temp->MutableVar()));
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Unexpected branch, output variable type is %s",
          framework::ToTypeName(dst->Type())));
    }
  }
}

394 395
static platform::Place GetPlaceOfVar(
    const std::shared_ptr<VariableWrapper>& var) {
396 397 398 399 400 401 402 403 404 405 406 407
  platform::Place place;
  if (var->Var().IsType<framework::LoDTensor>()) {
    place = var->Var().Get<framework::LoDTensor>().place();
  } else if (var->Var().IsType<framework::SelectedRows>()) {
    place = var->Var().Get<framework::SelectedRows>().place();
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "only support LoDTensor and SelectedRows in dygraph"));
  }
  return place;
}

408 409
void GradientAccumulator::AccumulateGrad() {
  /**
410 411
   * If the leaf gradient has been calculated done, the inner_var_
   * should be added to the var_.
412 413 414 415 416 417 418 419 420 421
   */
  if (!var_->IsLeafGrad() || !SumGradCompleted() || !HasInnerVar()) {
    return;
  }
  PADDLE_ENFORCE_EQ(HasInnerVar(), true,
                    platform::errors::InvalidArgument(
                        "Leaf tensor should have inner var to store results of "
                        "this auto-grad"));
  PADDLE_ENFORCE_EQ(inner_var_->Var().IsInitialized(), true,
                    platform::errors::InvalidArgument(
422
                        "Interior var of Leaf tensor should be initialized."));
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
  auto* src = inner_var_->MutableVar();
  auto* dst = var_->MutableVar();
  if (!var_->IsEmpty()) {
    VLOG(6) << "Leaf Gradient Var(" << var_->Name()
            << ") has been calculated by previous graph, will accumulate on "
               "previous graph.";
    if (dst->IsType<framework::LoDTensor>()) {
      if (src->IsType<framework::LoDTensor>()) {
        TensorAdd(*src, dst);
      } else if (src->IsType<framework::SelectedRows>()) {
        SelectedRowsAddToTensor(*src, dst);
      }
    } else if (dst->IsType<framework::SelectedRows>()) {
      if (src->IsType<framework::LoDTensor>()) {
        SelectedRowsAddToTensor(*dst, src);
        *dst = std::move(*src);
      } else if (src->IsType<framework::SelectedRows>()) {
        auto temp = SelectedRowsMerge(*src, *dst);
        *dst = std::move(*(temp->MutableVar()));
      }
    } else {
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Only support LoDTensor and SelectedRows for gradient var"));
    }
  } else {
    VLOG(6) << "Leaf Gradient Var(" << var_->Name()
            << ") has not been initialized, not accumulate. Just move";
    *(dst) = std::move(*src);
    var_->SetType(inner_var_->Type());
    var_->SetDataType(inner_var_->DataType());
453
    var_->SetIsEmpty(false);
454 455 456 457
  }
  inner_var_.reset();
}

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
void GradientAccumulator::CallGradientHooks() {
  PADDLE_ENFORCE_EQ(var_->IsLeafGrad(), true,
                    platform::errors::Unavailable(
                        "Only leaf gradient Tensor can deal with by gradient "
                        "hook in gradient accumulator."));
  PADDLE_ENFORCE_EQ(
      SumGradCompleted(), true,
      platform::errors::PreconditionNotMet(
          "Only can call gradient hooks after sum gradient completed."));
  PADDLE_ENFORCE_EQ(
      HasInnerVar(), true,
      platform::errors::PreconditionNotMet(
          "Leaf Tensor's inner var is nullptr when call gradient hook."));
  PADDLE_ENFORCE_EQ(
      inner_var_->Var().IsInitialized(), true,
      platform::errors::PreconditionNotMet("Leaf Tensor's inner var "
                                           "is not initialized when "
                                           "call gradient hook."));
476 477
  if (var_->HasVariableWrapperHook()) {
    VLOG(3) << "Call " << var_->GetVariableWrapperHooks().size()
478 479 480 481
            << " hooks of leaf gradient accumulator's inner var `"
            << var_->Name() << "`.";
    auto tmp_var = inner_var_;
    VLOG(3) << "Input var " << var_->Name() << "'s hook size - "
482 483
            << var_->GetVariableWrapperHooks().size();
    for (const auto& hook_pair : var_->GetVariableWrapperHooks()) {
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
      tmp_var = (*hook_pair.second)(tmp_var);
    }
    inner_var_ = tmp_var;
  }
}

void GradientAccumulator::CallReduceHooks() {
  PADDLE_ENFORCE_EQ(
      var_->IsLeafGrad(), true,
      platform::errors::Unavailable("Only leaf gradient Tensor can deal with "
                                    "by reduce hook in gradient accumulator."));
  PADDLE_ENFORCE_EQ(SumGradCompleted(), true,
                    platform::errors::PreconditionNotMet(
                        "Only can call reduce hooks after the gradient "
                        "summation is completed in current batch."));
  PADDLE_ENFORCE_EQ(HasInnerVar(), false,
                    platform::errors::PreconditionNotMet(
                        "Only can call reduce hooks after the "
                        "gradient accumulation is completed in "
                        "current batch or across batchs."));
504 505
  if (var_->HasVoidHook()) {
    for (const auto& hook : var_->GetVoidHooks()) {
506
      VLOG(3) << "call gradient accumulator backward hooks.";
507
      (*hook)();
508 509 510 511
    }
  }
}

512 513
void EagerGradientAccumulator::SumGrad(std::shared_ptr<VariableWrapper> var,
                                       size_t trace_id, bool unchange_input) {
514 515 516 517 518 519 520 521
  /**
   * If var has grad node, it indicates that this var would be an input
   * of a grad op. Therefore, it should not be changed.
   */
  if (var->HasGradNode()) {
    unchange_input = true;
  }

522
  auto* dst_var = Var();
523
  platform::Place place = GetPlaceOfVar(var);
524 525 526
  if (!dst_var->OverridedStopGradient()) {
    if (CurCnt() == 0) {
      MoveOrCopyVar(dst_var->MutableVar(), var->MutableVar(), unchange_input);
527
    } else {
528 529 530
      VLOG(6) << "Sum Gradient for: " << dst_var->Name()
              << " within this graph.";
      VariableWrapperAdd(var, dst_var, unchange_input);
531
    }
J
Jiabin Yang 已提交
532
  } else {
533 534 535
    if (!dst_var->Var().IsInitialized() ||
        !dst_var->Var().Get<framework::LoDTensor>().IsInitialized()) {
      VLOG(6) << "Set StopGradient Grad: " << dst_var->Name() << " as zero ";
536
      auto* dev_ctx = platform::DeviceContextPool::Instance().Get(place);
537 538 539 540
      if (!dst_var->Var().IsInitialized()) {
        auto* tensor =
            dst_var->MutableVar()->GetMutable<framework::LoDTensor>();
        VLOG(6) << "Dims of " << dst_var->Name() << " is set as: "
541 542 543 544 545
                << var->Var().Get<framework::LoDTensor>().dims();
        tensor->Resize(var->Var().Get<framework::LoDTensor>().dims());
        tensor->mutable_data(place, var->DataType());
        operators::math::set_constant(*dev_ctx, tensor, 0.0);
      } else {
546 547
        auto* tensor =
            dst_var->MutableVar()->GetMutable<framework::LoDTensor>();
548 549 550
        tensor->mutable_data(place, var->DataType());
        operators::math::set_constant(*dev_ctx, tensor, 0.0);
      }
551
    }
J
Jiabin Yang 已提交
552
  }
553

554 555 556 557 558 559
  // Type may be changed after OP run, such as VarTypeInference
  // so synchronous VariableWrapper with Variable.
  if (dst_var->Var().IsType<framework::LoDTensor>()) {
    dst_var->SetType(framework::proto::VarType::LOD_TENSOR);
  } else if (dst_var->Var().IsType<framework::SelectedRows>()) {
    dst_var->SetType(framework::proto::VarType::SELECTED_ROWS);
560
  }
561

562
  // Increase curent count
563
  IncreaseCurCnt();
J
Jiabin Yang 已提交
564 565
}

566 567 568
void SortedGradientAccumulator::SumGrad(std::shared_ptr<VariableWrapper> var,
                                        size_t trace_id, bool unchange_input) {
  auto* dst_var = Var();
569
  platform::Place place = GetPlaceOfVar(var);
570
  if (!dst_var->OverridedStopGradient()) {
571
    if (ref_cnt_ == 1) {
572
      MoveOrCopyVar(dst_var->MutableVar(), var->MutableVar(),
573
                    unchange_input || var->HasGradNode());
574 575 576 577 578
    } else {
      if (tmp_grad_vars_.empty()) {
        tmp_grad_vars_.reserve(ref_cnt_);
      }

579
      tmp_grad_vars_.emplace_back(std::move(var), trace_id, unchange_input);
580 581 582 583 584

      if (tmp_grad_vars_.size() != ref_cnt_) {
        return;
      }

585 586
      VLOG(6) << "Sum Gradient for: " << dst_var->Name()
              << " within this graph.";
587 588 589 590 591 592 593 594 595 596
      std::sort(tmp_grad_vars_.begin(), tmp_grad_vars_.end(),
                [](const SavedVarInfo& info1, const SavedVarInfo& info2) {
                  return info1.trace_id > info2.trace_id;
                });

      for (auto& var_info : tmp_grad_vars_) {
        if (var_info.var->HasGradNode()) {
          var_info.unchange_input = true;
        }
      }
597

598
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
599
      if (paddle::platform::is_gpu_place(place)) {
600
        // sum selected rows firstly
601 602 603
        for (auto& var_info : tmp_grad_vars_) {
          if (!var_info.var->Var().IsType<framework::SelectedRows>()) {
            continue;
604
          }
605

606 607
          if (CurCnt() == 0) {
            MoveOrCopyVar(dst_var->MutableVar(), var_info.var->MutableVar(),
608 609
                          var_info.unchange_input);
          } else {
610
            VariableWrapperAdd(var_info.var, dst_var, var_info.unchange_input);
611
          }
612 613

          var_info.var = nullptr;
614 615
          // Increase count
          IncreaseCurCnt();
616 617 618 619 620 621 622 623 624 625
        }

        for (auto& var_info : tmp_grad_vars_) {
          if (!var_info.var) {
            continue;
          }

          PADDLE_ENFORCE_EQ(var_info.var->Var().IsType<framework::LoDTensor>(),
                            true, platform::errors::PermissionDenied(
                                      "Gradient var must be LoDTensor"));
626 627
          if (CurCnt() == 0) {
            MoveOrCopyVar(dst_var->MutableVar(), var_info.var->MutableVar(),
628 629
                          var_info.unchange_input);
          } else {
630
            VariableWrapperAdd(var_info.var, dst_var, var_info.unchange_input);
631
          }
632 633

          var_info.var = nullptr;
634 635
          // Increase count
          IncreaseCurCnt();
636 637 638
        }
      } else {
#endif
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
        for (auto& var_info : tmp_grad_vars_) {
          if (!var_info.var) {
            continue;
          }
          PADDLE_ENFORCE_EQ(
              var_info.var->Var().IsType<framework::LoDTensor>() ||
                  var_info.var->Var().IsType<framework::SelectedRows>(),
              true, platform::errors::PermissionDenied("The type of Gradient "
                                                       "var must be LoDTensor "
                                                       "or SelectedRows"));
          if (CurCnt() == 0) {
            MoveOrCopyVar(dst_var->MutableVar(), var_info.var->MutableVar(),
                          var_info.unchange_input);
          } else {
            VariableWrapperAdd(var_info.var, dst_var, var_info.unchange_input);
          }
          var_info.var = nullptr;
          // Increase count
          IncreaseCurCnt();
658
        }
659
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
660
      }
661
#endif
662
      tmp_grad_vars_.clear();
J
Jiabin Yang 已提交
663
    }
664
  } else {
665 666
    if (!dst_var->Var().IsInitialized() ||
        !dst_var->Var().Get<framework::LoDTensor>().IsInitialized()) {
667 668
      VLOG(6) << "Set StopGradient Grad: " << var->Name() << " as zero";
      auto* dev_ctx = platform::DeviceContextPool::Instance().Get(place);
669 670 671 672
      if (!dst_var->Var().IsInitialized()) {
        auto* tensor =
            dst_var->MutableVar()->GetMutable<framework::LoDTensor>();
        VLOG(6) << "Dims of " << dst_var->Name() << " is set as: "
673 674 675 676 677
                << var->Var().Get<framework::LoDTensor>().dims();
        tensor->Resize(var->Var().Get<framework::LoDTensor>().dims());
        tensor->mutable_data(place, var->DataType());
        operators::math::set_constant(*dev_ctx, tensor, 0.0);
      } else {
678 679
        auto* tensor =
            dst_var->MutableVar()->GetMutable<framework::LoDTensor>();
680 681 682
        tensor->mutable_data(place, var->DataType());
        operators::math::set_constant(*dev_ctx, tensor, 0.0);
      }
J
Jiabin Yang 已提交
683
    }
684
    // looks like tmp_grad_vars will not have any member but just in case
J
Jiabin Yang 已提交
685 686
    tmp_grad_vars_.clear();
  }
687

688 689 690 691
  if (dst_var->Var().IsType<framework::LoDTensor>()) {
    dst_var->SetType(framework::proto::VarType::LOD_TENSOR);
  } else if (dst_var->Var().IsType<framework::SelectedRows>()) {
    dst_var->SetType(framework::proto::VarType::SELECTED_ROWS);
692
  }
J
Jiabin Yang 已提交
693 694 695 696
}

}  // namespace imperative
}  // namespace paddle