accumulation_node.cc 4.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/eager_tensor.h"
17
#include "paddle/fluid/imperative/gradient_accumulator.h"
18

19 20
#include "paddle/phi/api/all.h"
#include "paddle/phi/core/dense_tensor.h"
21 22 23 24 25 26

#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"

#include "glog/logging.h"
27

28 29
namespace egr {

30 31
static void CopyOrAddTensor(paddle::experimental::Tensor* tensor,
                            const paddle::experimental::Tensor& t) {
32 33 34 35 36
  if (!tensor->defined() || !tensor->initialized()) {
    // Simply copy tensor->impl
    *tensor = t;
  } else {
    // Accumulation
J
Jiabin Yang 已提交
37 38
    if (LIKELY(t.is_dense_tensor())) {
      if (LIKELY(tensor->is_dense_tensor())) {
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
        paddle::imperative::TensorAdd<paddle::experimental::Tensor>(t, tensor);
      } else {
        // TODO(jiabin): Support Other TensorBase later
        // TODO(zhanlve): Replace SelectedRowsAddTensor with
        // add_dygraph_function once it's supported
        paddle::experimental::Tensor new_buffer(
            std::make_shared<phi::DenseTensor>(), "tmp_accumulator");
        paddle::imperative::SelectedRowsAddTensor(*tensor, t, &new_buffer);
        tensor->set_impl(new_buffer.impl());
      }
    } else {
      // TODO(jiabin): Support Other TensorBase later
      // TODO(zhanlve): Replace SelectedRowsAddTensor with add_dygraph_function
      // once it's supported
      if (tensor->is_dense_tensor()) {
        paddle::imperative::SelectedRowsAddToTensor(t, tensor);
      } else {
        *tensor = std::move(*paddle::imperative::SelectedRowsMerge<
                            paddle::experimental::Tensor>(t, *tensor));
      }
    }
60 61 62
  }
}

63 64 65 66 67 68 69
paddle::small_vector<std::vector<paddle::experimental::Tensor>,
                     kSlotSmallVectorSize>
GradNodeAccumulation::operator()(
    paddle::small_vector<std::vector<paddle::experimental::Tensor>,
                         kSlotSmallVectorSize>& grads,  // NOLINT
    bool create_graph,
    bool is_new_grad) {
70
  VLOG(3) << "Running Eager Backward Node: GradNodeAccumulation";
71 72 73 74 75 76 77 78 79 80 81
  PADDLE_ENFORCE(grads.size() == 1,
                 paddle::platform::errors::Fatal(
                     "GradNodeAccumulation should take exactly 1 grad tensor"
                     "However received: %d slot.",
                     grads.size()));
  PADDLE_ENFORCE(grads[0].size() == 1,
                 paddle::platform::errors::Fatal(
                     "GradNodeAccumulation should take exactly 1 grad tensor"
                     "However received: %d in slot %d .",
                     grads[0].size(), 0));
  // Apply Gradient Hooks
82
  paddle::experimental::Tensor grad_out;
83
  if (GradientHooksRegistered()) {
84 85 86
    paddle::small_vector<std::vector<paddle::experimental::Tensor>,
                         kSlotSmallVectorSize>
        hooked_grads = ApplyGradientHooks(grads);
87
    grad_out = hooked_grads[0][0];
88
  } else {
89
    grad_out = grads[0][0];
90 91
  }

92
  if (!weak_grad_.expired() && !is_new_grad) {
93 94
    auto grad = weak_grad_.lock();
    CopyOrAddTensor(grad.get(), grad_out);
95 96 97 98 99 100 101
  }

  // Apply Reduce Hooks
  if (ReduceHooksRegistered()) {
    ApplyReduceHooks();
  }

102
  return {{grad_out}};
103 104
}

105
void GradNodeAccumulation::RegisterReduceHook(
106 107
    std::shared_ptr<TensorVoidHook>&& hook) {
  reduce_hooks_.emplace_back(std::move(hook));
108 109 110 111
}

void GradNodeAccumulation::ApplyReduceHooks() {
  for (auto& hook : reduce_hooks_) {
112
    (*hook)();
113 114
  }
}
115
}  // namespace egr