backward_test.cc 12.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/eager/backward.h"

17 18 19 20 21
#include <sstream>

#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
22
#include "paddle/fluid/eager/api/all.h"
23 24 25
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/grad_node_info.h"
26
#include "paddle/phi/core/dense_tensor.h"
27
#include "paddle/phi/core/kernel_registry.h"
28
#include "paddle/phi/core/tensor_meta.h"
T
tianshuo78520a 已提交
29
#include "test/cpp/eager/test_utils.h"
30 31

PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
32
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
33

34
namespace egr {
35 36 37

TEST(Backward, SingleNodeEmptyGrad) {
  // Prepare Device Contexts
38
  eager_test::InitEnv(paddle::platform::CPUPlace());
39 40

  // Prepare Inputs
41
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
42 43

  // Create Target Tensor
44
  paddle::Tensor target_tensor =
45 46 47 48 49 50
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        1.0 /*value*/,
                                        false /*is_leaf*/);
51

52
  paddle::Tensor leaf_tensor;
53 54 55 56 57 58 59 60 61 62 63
  {
    // Create Scale Node
    auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
    node0_ptr->SetAttributes_scale(5.0 /*scale*/);

    // Set grad in/out meta
    node0_ptr->SetDefaultGradInOutMeta();
    AutogradMeta* auto_grad_meta = EagerUtils::autograd_meta(&target_tensor);
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(node0_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
64
    auto_grad_meta->SetStopGradient(false);
65

66 67
    AutogradMeta* auto_grad_meta1 = EagerUtils::autograd_meta(&leaf_tensor);

68
    // Connect Tensor and AccumulationNode via AutoGradMeta
69 70
    auto acc_node_ptr =
        std::make_shared<egr::GradNodeAccumulation>(auto_grad_meta1);
71 72 73 74

    auto_grad_meta1->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
    auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
75
    auto_grad_meta1->SetStopGradient(false);
76

77
    node0_ptr->SetGradOutMeta({leaf_tensor}, 0);
78
  }
79
  std::vector<paddle::Tensor> outs = {target_tensor};
80
  // Run Backward
81
  Backward(outs, {});
82 83

  // Check Output Value
84
  eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
85 86 87 88
}

TEST(Backward, SingleNodeCustomGrad) {
  // Prepare Device Contexts
89
  eager_test::InitEnv(paddle::platform::CPUPlace());
90 91

  // Prepare Inputs
92
  std::vector<paddle::Tensor> target_tensors;
93
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
94 95

  // Create Target Tensor
96 97 98 99 100 101 102
  paddle::Tensor tensor =
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        1.0 /*value*/,
                                        false /*is_leaf*/);
103 104
  target_tensors.emplace_back(std::move(tensor));

105
  std::vector<paddle::Tensor> grad_tensors;
106
  // Create Grad Tensor
107
  paddle::Tensor grad_tensor =
108 109 110 111 112 113
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        10.0 /*value*/,
                                        false /*is_leaf*/);
114 115
  grad_tensors.emplace_back(std::move(grad_tensor));

116
  paddle::Tensor leaf_tensor;
117 118 119 120 121 122 123 124 125 126 127 128 129 130
  {
    // Create Scale Node
    auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
    node0_ptr->SetAttributes_scale(5.0 /*scale*/);

    // Set grad in/out meta
    node0_ptr->SetDefaultGradInOutMeta();

    // Connect Tensor and Node via AutoGradMeta
    AutogradMeta* auto_grad_meta =
        EagerUtils::autograd_meta(&(target_tensors[0]));
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(node0_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
131
    auto_grad_meta->SetStopGradient(false);
132 133

    AutogradMeta* auto_grad_meta1 = EagerUtils::autograd_meta(&leaf_tensor);
134 135 136 137
    // Connect Tensor and AccumulationNode via AutoGradMeta
    auto acc_node_ptr =
        std::make_shared<egr::GradNodeAccumulation>(auto_grad_meta1);

138 139 140
    auto_grad_meta1->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
    auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
141
    auto_grad_meta1->SetStopGradient(false);
142
    node0_ptr->SetGradOutMeta({leaf_tensor}, 0);
143 144 145
  }

  // Run Backward
146
  Backward(target_tensors, grad_tensors);
147 148

  // Check Output Value
149
  eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
150 151 152 153 154 155 156 157 158 159 160
}

/*
Node1
  |
Node0
  |
 inp0
*/
TEST(Backward, LinearNodes) {
  // Prepare Device Contexts
161
  eager_test::InitEnv(paddle::platform::CPUPlace());
162 163

  // Prepare Inputs
164
  std::vector<paddle::Tensor> target_tensors;
165
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
166 167

  // Create Target Tensor
168 169 170 171 172 173 174
  paddle::Tensor tensor =
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        1.0 /*value*/,
                                        false /*is_leaf*/);
175 176
  target_tensors.emplace_back(std::move(tensor));

177
  paddle::Tensor leaf_tensor;
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
  {
    // Create Node0
    auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
    node0_ptr->SetAttributes_scale(5.0 /*scale*/);

    // Set grad in/out meta for node0
    node0_ptr->SetDefaultGradInOutMeta();

    // Create Node1
    auto node1_ptr = std::make_shared<GradNodeScale>(1, 1);
    node1_ptr->SetAttributes_scale(10.0 /*scale*/);

    // Set grad in/out meta for node1
    node1_ptr->SetDefaultGradInOutMeta();

    // Connect Input Tensor and Node0 via AutoGradMeta
    AutogradMeta* auto_grad_meta =
        EagerUtils::autograd_meta(&(target_tensors[0]));
    auto_grad_meta->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(node0_ptr));
    auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
199
    auto_grad_meta->SetStopGradient(false);
200
    // Connect Node0 -> Node1 via Edge
201
    auto tmp_tensor = paddle::Tensor();
202 203 204 205 206
    auto* meta0 = EagerUtils::autograd_meta(&tmp_tensor);
    meta0->SetStopGradient(false);
    meta0->SetSingleOutRankWithSlot(0, 0);
    meta0->SetGradNode(node1_ptr);
    node0_ptr->SetGradOutMeta(tmp_tensor, 0);
207

208
    AutogradMeta* auto_grad_meta1 = EagerUtils::autograd_meta(&leaf_tensor);
209
    // Connect Tensor and AccumulationNode via AutoGradMeta
210 211
    auto acc_node_ptr =
        std::make_shared<egr::GradNodeAccumulation>(auto_grad_meta1);
212 213 214 215 216

    auto_grad_meta1->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
    auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);

217
    auto_grad_meta1->SetStopGradient(false);
218
    node1_ptr->SetGradOutMeta(leaf_tensor, 0);
219 220 221
  }

  // Use Empty Grad Tensor
222
  Backward(target_tensors, {});
223 224

  // Check Output Value
225
  eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
226 227 228 229 230 231 232 233 234 235 236
}

/*
    Node2
    |   |
Node0   Node1
  |      |
 inp0   inp1
*/
TEST(Backward, WithAccumulation) {
  // Prepare Device Contexts
237
  eager_test::InitEnv(paddle::platform::CPUPlace());
238 239

  // Prepare Inputs
240
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
241 242

  // Create Target Tensor
243
  std::vector<paddle::Tensor> target_tensors;
244 245 246 247 248 249 250 251 252 253 254 255 256 257
  paddle::Tensor tensor0 =
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        1.0 /*value*/,
                                        false /*is_leaf*/);
  paddle::Tensor tensor1 =
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        1.0 /*value*/,
                                        false /*is_leaf*/);
258 259 260 261
  target_tensors.emplace_back(std::move(tensor0));
  target_tensors.emplace_back(std::move(tensor1));

  // Create Grad Tensor
262 263
  std::vector<paddle::Tensor> grad_tensors;
  paddle::Tensor grad_tensor0 =
264 265 266 267 268 269
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        5.0 /*value*/,
                                        false /*is_leaf*/);
270
  paddle::Tensor grad_tensor1 =
271 272 273 274 275 276
      eager_test::CreateTensorWithValue(ddim,
                                        paddle::platform::CPUPlace(),
                                        phi::DataType::FLOAT32,
                                        phi::DataLayout::NCHW,
                                        10.0 /*value*/,
                                        false /*is_leaf*/);
277 278 279
  grad_tensors.emplace_back(std::move(grad_tensor0));
  grad_tensors.emplace_back(std::move(grad_tensor1));

280
  paddle::Tensor leaf_tensor;
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
  {
    // Create Node0
    auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
    node0_ptr->SetAttributes_scale(5.0 /*scale*/);
    node0_ptr->SetDefaultGradInOutMeta();

    // Create Node1
    auto node1_ptr = std::make_shared<GradNodeScale>(1, 1);
    node1_ptr->SetAttributes_scale(10.0 /*scale*/);
    node1_ptr->SetDefaultGradInOutMeta();
    // Create Node2
    auto node2_ptr = std::make_shared<GradNodeScale>(1, 1);
    node2_ptr->SetAttributes_scale(20.0 /*scale*/);
    node2_ptr->SetDefaultGradInOutMeta();
    // Connect Inp0 and Node0 via AutoGradMeta
    AutogradMeta* auto_grad_meta0 =
        EagerUtils::autograd_meta(&(target_tensors[0]));
    auto_grad_meta0->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(node0_ptr));
    auto_grad_meta0->SetSingleOutRankWithSlot(0, 0);
301
    auto_grad_meta0->SetStopGradient(false);
302 303 304 305 306 307
    // Connect Inp1 and Node1 via AutoGradMeta
    AutogradMeta* auto_grad_meta1 =
        EagerUtils::autograd_meta(&(target_tensors[1]));
    auto_grad_meta1->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(node1_ptr));
    auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
308
    auto_grad_meta1->SetStopGradient(false);
309 310

    // Connect Node0 -> Node2 via Edge
311
    auto tmp_tensor0 = paddle::Tensor();
312 313 314 315 316
    auto* meta0 = EagerUtils::autograd_meta(&tmp_tensor0);
    meta0->SetStopGradient(false);
    meta0->SetSingleOutRankWithSlot(0, 0);
    meta0->SetGradNode(node2_ptr);
    node0_ptr->SetGradOutMeta(tmp_tensor0, 0);
317 318

    // Connect Node1 -> Node2 via Edge
319
    auto tmp_tensor1 = paddle::Tensor();
320 321 322 323 324
    auto* meta1 = EagerUtils::autograd_meta(&tmp_tensor1);
    meta1->SetStopGradient(false);
    meta1->SetSingleOutRankWithSlot(0, 0);
    meta1->SetGradNode(node2_ptr);
    node1_ptr->SetGradOutMeta(tmp_tensor1, 0);
325

326
    AutogradMeta* auto_grad_meta2 = EagerUtils::autograd_meta(&leaf_tensor);
327
    // Connect Tensor and AccumulationNode via AutoGradMeta
328 329
    auto acc_node_ptr =
        std::make_shared<egr::GradNodeAccumulation>(auto_grad_meta2);
330 331 332 333 334

    auto_grad_meta2->SetGradNode(
        std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
    auto_grad_meta2->SetSingleOutRankWithSlot(0, 0);

335 336
    auto_grad_meta2->SetStopGradient(false);
    std::vector<egr::AutogradMeta*> res2 = {auto_grad_meta2};
337
    node2_ptr->SetGradOutMeta(leaf_tensor, 0);
338 339
  }

340
  Backward(target_tensors, grad_tensors);
341

342
  eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 2500.0);
343 344
}

345
}  // namespace egr