hook_test.cc 8.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <sstream>

#include "glog/logging.h"
#include "gtest/gtest.h"

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/eager/grad_node_info.h"

#include "paddle/fluid/eager/api/all.h"

28 29
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/tensor_meta.h"
30 31 32

#include "paddle/fluid/eager/tests/test_utils.h"

33
namespace egr {
34

35 36
paddle::experimental::Tensor hook_function(
    const paddle::experimental::Tensor& t) {
37
  auto t_dense = std::dynamic_pointer_cast<phi::DenseTensor>(t.impl());
38

39 40
  auto ret_meta = phi::DenseTensorMeta(t_dense->dtype(), t_dense->dims(),
                                       t_dense->layout());
41
  auto place = t_dense->place();
42 43 44
  size_t bytes_size = phi::product(t_dense->dims()) * SizeOf(t_dense->dtype());
  auto ret_dense = std::make_shared<phi::DenseTensor>(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
45
          paddle::memory::Alloc(place, bytes_size)),
46 47
      std::move(ret_meta));

48 49
  float* t_ptr = t_dense->mutable_data<float>(place);
  float* ret_ptr = ret_dense->mutable_data<float>(place);
50 51 52 53
  for (int i = 0; i < ret_dense->numel(); i++) {
    ret_ptr[i] = t_ptr[i] + 3.0;
  }

54
  auto ret_impl = std::dynamic_pointer_cast<phi::TensorBase>(ret_dense);
55
  paddle::experimental::Tensor ret = paddle::experimental::Tensor();
56 57 58 59 60 61
  ret.set_impl(ret_impl);

  return ret;
}

TEST(RetainGrad, HookBeforeRetainGrad) {
62
  eager_test::InitEnv(paddle::platform::CPUPlace());
63 64

  // Prepare Inputs
65
  std::vector<paddle::experimental::Tensor> target_tensors;
66
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
67 68

  // Create Target Tensor
69
  paddle::experimental::Tensor tensor = egr_utils_api::CreateTensorWithValue(
70 71
      ddim, paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
      phi::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
72
  target_tensors.emplace_back(std::move(tensor));
73
  paddle::experimental::Tensor& target_tensor = target_tensors[0];
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

  // Create ScaleNode
  auto scale_node_ptr = std::make_shared<GradNodeScale>(1, 1);
  scale_node_ptr->SetAttributes_scale(5.0 /*scale*/);

  // Set grad in/out meta for node0
  scale_node_ptr->SetDefaultGradInOutMeta();

  // Create AccumulationNode
  auto acc_node_ptr = std::make_shared<GradNodeAccumulation>();

  // Connect Input Tensor and ScaleNode via AutoGradMeta
  // Apply RetainGrad
  {
    // ScaleNode Hook: +3
89 90 91
    std::function<paddle::experimental::Tensor(
        const paddle::experimental::Tensor&)>
        hook = &hook_function;
92 93 94 95 96

    auto auto_grad_meta = std::make_shared<AutogradMeta>();
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(scale_node_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
97
    auto_grad_meta->SetStopGradient(false);
98 99 100 101
    target_tensor.set_autograd_meta(
        std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
            auto_grad_meta));

102 103 104
    egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook);
    egr_utils_api::RetainGradForTensor(
        target_tensor);  // result: 1.0 + 3.0 = 4.0
105 106 107 108 109
  }

  // Connect ScaleNode -> AccumulationNode via Edge
  {
    auto meta = AutogradMeta();
110
    meta.SetStopGradient(false);
111 112
    meta.SetSingleOutRankWithSlot(0, 0);
    meta.SetGradNode(acc_node_ptr);
113 114
    std::vector<egr::AutogradMeta*> res = {&meta};
    scale_node_ptr->AddEdges(&res, 0);
115 116 117
  }

  // Retain Grad for leaf tensor1
118
  paddle::experimental::Tensor leaf_tensor = paddle::experimental::Tensor();
119 120
  {
    // AccumulationNode Hook: +3
121 122 123
    std::function<paddle::experimental::Tensor(
        const paddle::experimental::Tensor&)>
        hook = &hook_function;
124 125 126 127 128 129 130 131 132

    auto auto_grad_meta = std::make_shared<AutogradMeta>();
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
    leaf_tensor.set_autograd_meta(
        std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
            auto_grad_meta));

133 134 135
    egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook);
    egr_utils_api::RetainGradForTensor(
        leaf_tensor);  // result: 4.0*5.0 + 3.0 = 23.0
136 137 138 139
  }

  RunBackward(target_tensors, {});

140 141
  eager_test::CompareGradTensorWithValue<float>(target_tensor, 4.0);
  eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 23.0);
142 143 144
}

TEST(RetainGrad, HookAfterRetainGrad) {
145
  eager_test::InitEnv(paddle::platform::CPUPlace());
146 147

  // Prepare Inputs
148
  std::vector<paddle::experimental::Tensor> target_tensors;
149
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
150 151

  // Create Target Tensor
152
  paddle::experimental::Tensor tensor = egr_utils_api::CreateTensorWithValue(
153 154
      ddim, paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
      phi::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
155
  target_tensors.emplace_back(std::move(tensor));
156
  paddle::experimental::Tensor& target_tensor = target_tensors[0];
157 158 159 160 161 162 163 164 165 166 167 168 169

  // Create ScaleNode
  auto scale_node_ptr = std::make_shared<GradNodeScale>(1, 1);
  scale_node_ptr->SetAttributes_scale(5.0 /*scale*/);
  // Set grad in/out meta for node0
  scale_node_ptr->SetDefaultGradInOutMeta();
  // Create AccumulationNode
  auto acc_node_ptr = std::make_shared<GradNodeAccumulation>();

  // Connect Input Tensor and ScaleNode via AutoGradMeta
  // Apply RetainGrad
  {
    // ScaleNode Hook: +3
170 171 172
    std::function<paddle::experimental::Tensor(
        const paddle::experimental::Tensor&)>
        hook = &hook_function;
173 174 175 176 177

    auto auto_grad_meta = std::make_shared<AutogradMeta>();
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(scale_node_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
178
    auto_grad_meta->SetStopGradient(false);
179 180 181 182
    target_tensor.set_autograd_meta(
        std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
            auto_grad_meta));

183 184
    egr_utils_api::RetainGradForTensor(target_tensor);  // result: 1.0
    egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook);
185 186 187 188 189
  }

  // Connect ScaleNode -> AccumulationNode via Edge
  {
    auto meta = AutogradMeta();
190
    meta.SetStopGradient(false);
191 192
    meta.SetSingleOutRankWithSlot(0, 0);
    meta.SetGradNode(acc_node_ptr);
193 194
    std::vector<egr::AutogradMeta*> res = {&meta};
    scale_node_ptr->AddEdges(&res, 0);
195 196 197
  }

  // Retain Grad for leaf tensor1
198
  paddle::experimental::Tensor leaf_tensor = paddle::experimental::Tensor();
199 200
  {
    // AccumulationNode Hook: +3
201 202 203
    std::function<paddle::experimental::Tensor(
        const paddle::experimental::Tensor&)>
        hook = &hook_function;
204 205 206 207 208 209 210 211 212

    auto auto_grad_meta = std::make_shared<AutogradMeta>();
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
    leaf_tensor.set_autograd_meta(
        std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
            auto_grad_meta));

213 214 215 216 217
    egr_utils_api::RetainGradForTensor(
        leaf_tensor);  // RetainGrad for leaf tensor gets
                       // postponed, result: 4.0*5.0 + 3.0 =
                       // 23.0
    egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook);
218 219 220
  }

  RunBackward(target_tensors, {});
221 222
  eager_test::CompareGradTensorWithValue<float>(target_tensor, 1.0);
  eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 23.0);
223
}
224
}  // namespace egr