eager_utils_test.cc 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include <sstream>
16 17

#include "gtest/gtest.h"
18
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
19
#include "paddle/fluid/eager/eager_tensor.h"
20 21 22 23
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/tests/data_structure_tests/grad_node_test.h"
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/eager/utils.h"
24
#include "paddle/phi/api/lib/utils/allocator.h"
25 26 27
#include "paddle/phi/core/kernel_registry.h"

PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
28

29
namespace egr {
30 31 32

TEST(EagerUtils, AutoGradMeta) {
  // Construct Eager Tensor
33 34 35
  phi::DenseTensorMeta meta =
      phi::DenseTensorMeta(phi::DataType::FLOAT32, phi::make_ddim({1, 1}));
  std::shared_ptr<phi::DenseTensor> dt0 = std::make_shared<phi::DenseTensor>(
36 37 38
      std::make_unique<paddle::experimental::DefaultAllocator>(
          paddle::platform::CPUPlace())
          .get(),
39
      meta);
40
  dt0->mutable_data<float>(paddle::platform::CPUPlace())[0] = 10.0;
41
  paddle::experimental::Tensor et0 = paddle::experimental::Tensor(dt0);
42

43
  std::shared_ptr<phi::DenseTensor> dt1 = std::make_shared<phi::DenseTensor>(
44 45 46
      std::make_unique<paddle::experimental::DefaultAllocator>(
          paddle::platform::CPUPlace())
          .get(),
47
      meta);
48
  dt1->mutable_data<float>(paddle::platform::CPUPlace())[0] = 20.0;
49
  paddle::experimental::Tensor et1 = paddle::experimental::Tensor(dt1);
50 51 52 53 54 55 56 57 58 59

  // unsafe_autograd_meta()
  // autograd_meta()
  AutogradMeta* autograd_meta0 = EagerUtils::autograd_meta(&et0);
  AutogradMeta* autograd_meta1 = EagerUtils::autograd_meta(&et1);

  AutogradMeta* unsafe_autograd_meta_after =
      EagerUtils::unsafe_autograd_meta(et0);
  CHECK_NOTNULL(unsafe_autograd_meta_after);

60 61 62 63
  // NOTE: Since autograd_meta will be copied make sure it's not null
  std::vector<paddle::experimental::Tensor> ets = {et0, et1};
  auto test_node = std::make_shared<eager_test::GradTestNode>();

64
  std::vector<AutogradMeta*> autograd_metas = EagerUtils::autograd_meta(&ets);
65
  std::vector<AutogradMeta*> unsafe_autograd_metas =
66
      EagerUtils::unsafe_autograd_meta(ets);
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
  CHECK_NOTNULL(unsafe_autograd_metas[0]);
  CHECK_NOTNULL(unsafe_autograd_metas[1]);

  // Set Autograd Meta
  autograd_meta0->SetSingleOutRankWithSlot(0, 1);

  autograd_meta0->SetGradNode(test_node);

  // OutRankInfo()
  std::pair<size_t, size_t> out_rank_info0 = EagerUtils::OutRankInfo(et0);
  CHECK_EQ(static_cast<int>(out_rank_info0.first), 0);
  CHECK_EQ(static_cast<int>(out_rank_info0.second), 1);

  // grad_node()
  std::shared_ptr<GradNodeBase> grad_node0 = EagerUtils::grad_node(et0);
  CHECK_NOTNULL(grad_node0.get());

  EagerUtils::SetHistory(autograd_meta1, test_node);
  EagerUtils::SetHistory({autograd_meta1}, test_node);
  std::shared_ptr<GradNodeBase> grad_node1 = EagerUtils::grad_node(et1);
  CHECK_NOTNULL(grad_node1.get());

  // SetOutRankWithSlot()
  EagerUtils::SetOutRankWithSlot(autograd_meta1, 0);
  std::pair<size_t, size_t> out_rank_info1 = EagerUtils::OutRankInfo(et1);
  CHECK_EQ(static_cast<int>(out_rank_info1.first), 0);
  CHECK_EQ(static_cast<int>(out_rank_info1.second), 0);

  EagerUtils::SetOutRankWithSlot(&autograd_metas, 0);
  std::pair<size_t, size_t> out_rank_info2 = EagerUtils::OutRankInfo(et0);
  CHECK_EQ(static_cast<int>(out_rank_info2.first), 0);
  CHECK_EQ(static_cast<int>(out_rank_info2.second), 0);

  std::pair<size_t, size_t> out_rank_info3 = EagerUtils::OutRankInfo(et1);
  CHECK_EQ(static_cast<int>(out_rank_info3.first), 0);
  CHECK_EQ(static_cast<int>(out_rank_info3.second), 1);
}

105
template <typename T>
106 107
paddle::experimental::Tensor CreateTestCPUTensor(
    T val, const paddle::framework::DDim& ddim) {
108 109
  phi::DenseTensorMeta meta =
      phi::DenseTensorMeta(phi::DataType::FLOAT32, ddim);
110
  paddle::experimental::Tensor tensor;
111
  std::shared_ptr<phi::DenseTensor> dt = std::make_shared<phi::DenseTensor>(
112 113 114
      std::make_unique<paddle::experimental::DefaultAllocator>(
          paddle::platform::CPUPlace())
          .get(),
115
      meta);
116
  auto* dt_ptr = dt->mutable_data<T>(paddle::platform::CPUPlace());
117 118 119 120 121 122
  for (int64_t i = 0; i < dt->numel(); i++) {
    dt_ptr[i] = val;
  }
  tensor.set_impl(dt);
  return tensor;
}
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
TEST(EagerUtils, ComputeRequireGrad) {
  auto auto_grad0 = std::make_shared<egr::AutogradMeta>();
  auto auto_grad1 = std::make_shared<egr::AutogradMeta>();
  auto auto_grad2 = std::make_shared<egr::AutogradMeta>();
  auto auto_grad3 = std::make_shared<egr::AutogradMeta>();
  CHECK_EQ(auto_grad0->NumericStopGradient(), -1);
  VLOG(6) << "Single Test ComputeRequireGrad";
  auto_grad0->SetStopGradient(true);
  CHECK(egr::EagerUtils::ComputeRequireGrad(true, auto_grad0.get()) == false);
  CHECK(egr::EagerUtils::ComputeRequireGrad(false, auto_grad0.get()) == false);
  auto_grad0->SetStopGradient(false);
  CHECK(egr::EagerUtils::ComputeRequireGrad(false, auto_grad0.get()) == false);
  CHECK(egr::EagerUtils::ComputeRequireGrad(true, auto_grad0.get()) == true);

  VLOG(6) << "Multi Test ComputeRequireGrad";
  auto_grad0->SetStopGradient(false);
  auto_grad1->SetStopGradient(true);
141 142 143 144
  CHECK(egr::EagerUtils::ComputeRequireGrad(
            true, auto_grad0.get(), auto_grad1.get()) == true);
  CHECK(egr::EagerUtils::ComputeRequireGrad(
            false, auto_grad0.get(), auto_grad1.get()) == false);
145
  auto_grad0->SetStopGradient(true);
146 147 148 149
  CHECK(egr::EagerUtils::ComputeRequireGrad(
            true, auto_grad0.get(), auto_grad1.get()) == false);
  CHECK(egr::EagerUtils::ComputeRequireGrad(
            false, auto_grad0.get(), auto_grad1.get()) == false);
150 151 152 153 154 155 156 157 158 159 160
}

TEST(EagerUtils, PassStopGradient) {
  auto auto_grad0 = std::make_shared<egr::AutogradMeta>();
  auto auto_grad1 = std::make_shared<egr::AutogradMeta>();
  auto auto_grad2 = std::make_shared<egr::AutogradMeta>();
  auto auto_grad3 = std::make_shared<egr::AutogradMeta>();
  CHECK_EQ(auto_grad0->NumericStopGradient(), -1);
  VLOG(6) << "Test PassStopGradient";
  egr::EagerUtils::PassStopGradient(false, auto_grad0.get());
  CHECK(auto_grad0->StopGradient() == false);
161 162 163 164 165
  egr::EagerUtils::PassStopGradient(true,
                                    auto_grad0.get(),
                                    auto_grad1.get(),
                                    auto_grad2.get(),
                                    auto_grad3.get());
166
  CHECK(auto_grad0->StopGradient() == false);
167 168 169 170 171
  CHECK(auto_grad1->StopGradient() == true);
  CHECK(auto_grad2->StopGradient() == true);
  CHECK(auto_grad3->StopGradient() == true);
}

172
TEST(EagerUtils, TrySyncToVar) {
173
  paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
174
  auto tensor = CreateTestCPUTensor(5.0f, ddim);
175
  std::vector<std::shared_ptr<egr::EagerVariable>> var_bases = {
176
      egr::EagerUtils::TrySyncToVar(tensor)};
177 178 179 180 181 182 183 184 185 186 187 188 189

  paddle::framework::Variable* var = var_bases[0]->MutableVar();
  const auto& framework_tensor = var->Get<paddle::framework::LoDTensor>();

  const float* ptr = framework_tensor.data<float>();
  VLOG(6) << "Check Value for SyncToVarsSingle";
  CHECK_EQ(framework_tensor.numel(), tensor.numel());

  for (int i = 0; i < framework_tensor.numel(); i++) {
    CHECK_EQ(ptr[i], 5.0f);
  }
}

190
TEST(EagerUtils, TrySyncToVars) {
191
  paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
192 193
  std::vector<paddle::experimental::Tensor> tensors = {
      CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)};
194

195
  std::vector<std::shared_ptr<egr::EagerVariable>> var_bases =
196
      egr::EagerUtils::TrySyncToVars(tensors);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

  {
    paddle::framework::Variable* var = var_bases[0]->MutableVar();
    const auto& framework_tensor = var->Get<paddle::framework::LoDTensor>();

    const float* ptr = framework_tensor.data<float>();
    CHECK_EQ(framework_tensor.numel(), tensors[0].numel());

    for (int i = 0; i < framework_tensor.numel(); i++) {
      CHECK_EQ(ptr[i], 1.0);
    }
  }

  {
    paddle::framework::Variable* var = var_bases[1]->MutableVar();
    const auto& framework_tensor = var->Get<paddle::framework::LoDTensor>();

    const float* ptr = framework_tensor.data<float>();
    VLOG(6) << "Check Value for SyncToVarsMultiple";
    CHECK_EQ(framework_tensor.numel(), tensors[0].numel());

    for (int i = 0; i < framework_tensor.numel(); i++) {
      CHECK_EQ(ptr[i], 2.0);
    }
  }
}

224 225
TEST(EagerUtils, CreateVars) {
  VLOG(6) << "Check CreateVars";
226
  std::vector<std::shared_ptr<egr::EagerVariable>> outs =
227
      egr::EagerUtils::CreateVars(2);
228
  CHECK_EQ(outs.size(), size_t(2));
229
  CHECK(outs[0]->Var().IsInitialized() == false);
230
}
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
TEST(EagerUtils, GetGradAccumulationNode) {
  VLOG(6) << "Check GetGradAccumulationNode";
  paddle::experimental::Tensor t0("test_tensor");
  ASSERT_EQ(egr::EagerUtils::GetGradAccumulationNode(t0), nullptr);
  auto autograd_ptr0 = egr::EagerUtils::autograd_meta(&t0);
  autograd_ptr0->SetStopGradient(true);
  ASSERT_EQ(egr::EagerUtils::GetGradAccumulationNode(t0), nullptr);
  autograd_ptr0->SetStopGradient(false);
  auto res = std::dynamic_pointer_cast<egr::GradNodeAccumulation>(
      egr::EagerUtils::GetGradAccumulationNode(t0));
  ASSERT_TRUE(res != nullptr);
  auto res2 = egr::EagerUtils::GetGradAccumulationNode(t0);
  ASSERT_EQ(res2.get(), res.get());
  autograd_ptr0->SetStopGradient(true);
  auto res3 = egr::EagerUtils::GetGradAccumulationNode(t0);
  ASSERT_EQ(res3, nullptr);
  autograd_ptr0->SetStopGradient(false);
  autograd_ptr0->SetGradNode(
      std::make_shared<eager_test::GradTestNode>(1, 2.0, 3));
  ASSERT_ANY_THROW(egr::EagerUtils::GetGradAccumulationNode(t0));
}

W
wanghuancoder 已提交
254
TEST(EagerUtils, FillZeroForEmptyOptionalGradInput) {
255 256 257 258 259
  paddle::small_vector<std::vector<paddle::experimental::Tensor>,
                       egr::kSlotSmallVectorSize>
      grads = {std::vector<paddle::experimental::Tensor>(1)};
  paddle::small_vector<std::vector<GradSlotMeta>, egr::kSlotSmallVectorSize>
      slot_metas = {std::vector<GradSlotMeta>(1)};
260 261 262 263 264 265 266

  phi::DenseTensorMeta tensor_meta;
  tensor_meta.dtype = paddle::experimental::DataType::FLOAT32;
  tensor_meta.dims = {2, 4};
  slot_metas[0][0].SetTensorMeta(tensor_meta);
  slot_metas[0][0].SetPlace(phi::CPUPlace());

W
wanghuancoder 已提交
267
  EagerUtils::FillZeroForEmptyOptionalGradInput(&grads[0], slot_metas[0]);
268 269 270
  eager_test::CompareTensorWithValue<float>(grads[0][0], 0.0);
}

271
}  // namespace egr