未验证 提交 3d2ec707 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Eager dygraph egr_utils_api namespace refactor (#37654)

* Refactored eager legacy namespace

* Fixed namespace issues
上级 eb9e3305
......@@ -20,6 +20,7 @@
#include "paddle/pten/core/dense_tensor.h"
namespace egr {
namespace egr_utils_api {
void RegisterGradientHookForTensor(
const egr::EagerTensor& tensor,
......@@ -90,4 +91,5 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
}
}
} // namespace egr_utils_api
} // namespace egr
......@@ -18,6 +18,7 @@
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/pten/api/all.h"
namespace egr {
namespace egr_utils_api {
void RegisterGradientHookForTensor(
const egr::EagerTensor& tensor,
......@@ -27,4 +28,5 @@ void RegisterReduceHookForTensor(const egr::EagerTensor& tensor,
const std::function<void(void)>& hook);
void RetainGradForTensor(const egr::EagerTensor& tensor);
} // namespace egr_utils_api
} // namespace egr
......@@ -26,6 +26,7 @@
#include "paddle/fluid/framework/variable.h"
namespace egr {
namespace egr_utils_api {
bool IsLeafTensor(const egr::EagerTensor& target) {
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(target);
......@@ -58,4 +59,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
return out;
}
} // namespace egr_utils_api
} // namespace egr
......@@ -18,6 +18,7 @@
#include "paddle/pten/api/all.h"
namespace egr {
namespace egr_utils_api {
// If and only if the tensor holds an AccumulationNode
// Then it's treated as a leaf tensor
......@@ -29,4 +30,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
const pten::DataLayout& layout,
float value, bool is_leaf = true);
} // namespace egr_utils_api
} // namespace egr
......@@ -32,8 +32,8 @@
#include "gperftools/profiler.h"
#endif
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT
// Disable pten path
DECLARE_bool(run_pten_kernel);
......
......@@ -31,8 +31,8 @@
#include "gperftools/profiler.h"
#endif
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT
DECLARE_bool(run_pten_kernel);
......
......@@ -30,19 +30,17 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_meta.h"
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
TEST(Backward, SingleNodeEmptyGrad) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor target_tensor = CreateTensorWithValue(
egr::EagerTensor target_tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
......@@ -67,7 +65,7 @@ TEST(Backward, SingleNodeEmptyGrad) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);
// Connect Node0 -> AccumulationNode via Edge
auto meta = egr::AutogradMeta();
......@@ -80,26 +78,26 @@ TEST(Backward, SingleNodeEmptyGrad) {
RunBackward(outs, {});
// Check Output Value
CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
}
TEST(Backward, SingleNodeCustomGrad) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
std::vector<egr::EagerTensor> grad_tensors;
// Create Grad Tensor
egr::EagerTensor grad_tensor = CreateTensorWithValue(
egr::EagerTensor grad_tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor));
......@@ -128,7 +126,7 @@ TEST(Backward, SingleNodeCustomGrad) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);
// Connect Node0 -> AccumulationNode via Edge
auto meta = egr::AutogradMeta();
......@@ -141,7 +139,7 @@ TEST(Backward, SingleNodeCustomGrad) {
RunBackward(target_tensors, grad_tensors);
// Check Output Value
CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
}
/*
......@@ -153,14 +151,14 @@ Node0
*/
TEST(Backward, LinearNodes) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
......@@ -202,7 +200,7 @@ TEST(Backward, LinearNodes) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);
// Connect Node1 -> AccumulationNode via Edge
auto meta1 = egr::AutogradMeta();
......@@ -215,7 +213,7 @@ TEST(Backward, LinearNodes) {
RunBackward(target_tensors, {});
// Check Output Value
CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
}
/*
......@@ -227,17 +225,17 @@ Node0 Node1
*/
TEST(Backward, WithAccumulation) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
std::vector<egr::EagerTensor> target_tensors;
egr::EagerTensor tensor0 = CreateTensorWithValue(
egr::EagerTensor tensor0 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
egr::EagerTensor tensor1 = CreateTensorWithValue(
egr::EagerTensor tensor1 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0));
......@@ -245,10 +243,10 @@ TEST(Backward, WithAccumulation) {
// Create Grad Tensor
std::vector<egr::EagerTensor> grad_tensors;
egr::EagerTensor grad_tensor0 = CreateTensorWithValue(
egr::EagerTensor grad_tensor0 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/);
egr::EagerTensor grad_tensor1 = CreateTensorWithValue(
egr::EagerTensor grad_tensor1 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0));
......@@ -303,7 +301,7 @@ TEST(Backward, WithAccumulation) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta2->SetSingleOutRankWithSlot(0, 0);
egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);
// Connect Node2 -> AccumulationNode via Edge
auto meta2 = egr::AutogradMeta();
......@@ -314,7 +312,7 @@ TEST(Backward, WithAccumulation) {
RunBackward(target_tensors, grad_tensors);
CompareGradTensorWithValue<float>(leaf_tensor, 2500.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 2500.0);
}
} // namespace eager_test
} // namespace egr
......@@ -31,17 +31,15 @@
#include "paddle/fluid/eager/tests/test_utils.h"
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
TEST(CrossBatchAccumulation, SingleScaleNode) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
......@@ -60,7 +58,7 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
auto_grad_meta->SetGradNode(
std::dynamic_pointer_cast<GradNodeBase>(scale_node_ptr));
auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
RetainGradForTensor(target_tensor); // result: 1.0
egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0
auto meta = AutogradMeta();
meta.SetSingleOutRankWithSlot(0, 0);
......@@ -71,18 +69,18 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
auto_grad_meta1->SetGradNode(
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);
}
RunBackward(target_tensors, {});
CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
eager_test::CompareGradTensorWithValue<float>(target_tensor, 1.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
RunBackward(target_tensors, {});
CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 10.0);
eager_test::CompareGradTensorWithValue<float>(target_tensor, 1.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 10.0);
}
} // namespace eager_test
} // namespace egr
......@@ -24,10 +24,7 @@
#include "paddle/pten/api/lib/utils/allocator.h"
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
TEST(EagerUtils, AutoGradMeta) {
// Construct Eager Tensor
......@@ -167,7 +164,7 @@ TEST(EagerUtils, PassStopGradient) {
TEST(EagerUtils, SyncToVarsSingle) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
auto tensor = eager_test::CreateTestCPUTensor(5.0f, ddim);
auto tensor = CreateTestCPUTensor(5.0f, ddim);
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases =
egr::EagerUtils::SyncToVars(tensor);
......@@ -185,9 +182,8 @@ TEST(EagerUtils, SyncToVarsSingle) {
TEST(EagerUtils, SyncToVarsMultiple) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
std::vector<egr::EagerTensor> tensors = {
eager_test::CreateTestCPUTensor(1.0f, ddim),
eager_test::CreateTestCPUTensor(2.0f, ddim)};
std::vector<egr::EagerTensor> tensors = {CreateTestCPUTensor(1.0f, ddim),
CreateTestCPUTensor(2.0f, ddim)};
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases =
egr::EagerUtils::SyncToVars(tensors);
......@@ -280,4 +276,4 @@ TEST(EagerUtils, ConstructDuplicableOutput) {
CHECK(outs[0]->initialized() == false);
}
} // namespace eager_test
} // namespace egr
......@@ -27,21 +27,18 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_meta.h"
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
TEST(Forward, SingleNode) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor t = CreateTensorWithValue(
egr::EagerTensor t = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(t));
......@@ -55,7 +52,7 @@ TEST(Forward, SingleNode) {
tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output
CompareTensorWithValue<float>(out, 13.0);
eager_test::CompareTensorWithValue<float>(out, 13.0);
// Examine GradNode
{
......@@ -80,14 +77,14 @@ Node1
out
*/
TEST(Forward, LinearNodes) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor t = CreateTensorWithValue(
egr::EagerTensor t = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(t));
......@@ -108,10 +105,10 @@ TEST(Forward, LinearNodes) {
out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output 0
CompareTensorWithValue<float>(out0, 13.0);
eager_test::CompareTensorWithValue<float>(out0, 13.0);
// Examine Forward Output 1
CompareTensorWithValue<float>(out1, 75.0);
eager_test::CompareTensorWithValue<float>(out1, 75.0);
// Examine GradNode
{
......@@ -156,14 +153,14 @@ TEST(Forward, LinearNodes) {
out1 out2
*/
TEST(Forward, BranchedNodes) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor t = CreateTensorWithValue(
egr::EagerTensor t = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(t));
......@@ -190,13 +187,13 @@ TEST(Forward, BranchedNodes) {
out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output 0
CompareTensorWithValue<float>(out0, 13.0);
eager_test::CompareTensorWithValue<float>(out0, 13.0);
// Examine Forward Output 1
CompareTensorWithValue<float>(out1, 75.0);
eager_test::CompareTensorWithValue<float>(out1, 75.0);
// Examine Forward Output 2
CompareTensorWithValue<float>(out2, 150.0);
eager_test::CompareTensorWithValue<float>(out2, 150.0);
// Examine GradNode
{
......@@ -248,4 +245,4 @@ TEST(Forward, BranchedNodes) {
}
}
} // namespace eager_test
} // namespace egr
......@@ -29,10 +29,7 @@
#include "paddle/fluid/eager/tests/test_utils.h"
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
egr::EagerTensor hook_function(const egr::EagerTensor& t) {
auto t_dense = std::dynamic_pointer_cast<pten::DenseTensor>(t.impl());
......@@ -61,14 +58,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
}
TEST(FwdBwdJoint, SingleNode) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
float scale = 2.0;
......@@ -77,7 +74,7 @@ TEST(FwdBwdJoint, SingleNode) {
tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output
CompareTensorWithValue<float>(out, 13.0);
eager_test::CompareTensorWithValue<float>(out, 13.0);
std::vector<egr::EagerTensor> outs = {out};
// 4. Run Backward
......@@ -88,7 +85,7 @@ TEST(FwdBwdJoint, SingleNode) {
EagerUtils::unsafe_autograd_meta(tensor)->Grad().impl())
->data<float>()[0];
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 2.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 2.0);
}
/*
......@@ -101,14 +98,14 @@ Node1
out
*/
TEST(FwdBwdJoint, LinearNodes) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
// Run Forward Node 0
......@@ -125,17 +122,17 @@ TEST(FwdBwdJoint, LinearNodes) {
out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output 0
CompareTensorWithValue<float>(out0, 13.0);
eager_test::CompareTensorWithValue<float>(out0, 13.0);
// Examine Forward Output 1
CompareTensorWithValue<float>(out1, 75.0);
eager_test::CompareTensorWithValue<float>(out1, 75.0);
std::vector<egr::EagerTensor> outs = {out1};
// 4. Run Backward
RunBackward(outs, {});
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 10.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 10.0);
}
/*
......@@ -149,14 +146,14 @@ TEST(FwdBwdJoint, LinearNodes) {
out1 out2
*/
TEST(FwdBwdJoint, BranchedNodes) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
// Run Forward Node 0
......@@ -179,10 +176,10 @@ TEST(FwdBwdJoint, BranchedNodes) {
out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output 0
CompareTensorWithValue<float>(out0, 13.0);
eager_test::CompareTensorWithValue<float>(out0, 13.0);
// Examine Forward Output 1
CompareTensorWithValue<float>(out1, 75.0);
eager_test::CompareTensorWithValue<float>(out1, 75.0);
// Examine Forward Output 2
{
......@@ -201,7 +198,7 @@ TEST(FwdBwdJoint, BranchedNodes) {
RunBackward(outs, {});
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 30.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 30.0);
}
/*
......@@ -215,14 +212,14 @@ TEST(FwdBwdJoint, BranchedNodes) {
out1 out2
*/
TEST(FwdBwdJoint, GradientHook) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
std::function<egr::EagerTensor(const egr::EagerTensor&)> hook =
&hook_function;
......@@ -234,24 +231,24 @@ TEST(FwdBwdJoint, GradientHook) {
egr::EagerTensor out0 =
egr::scale(tensor, scale0, bias0, true /*bias_after_scale*/,
true /*trace_backward*/);
RetainGradForTensor(out0); // hook: +5
RegisterGradientHookForTensor(out0, hook); // hook: +5
egr_utils_api::RetainGradForTensor(out0); // hook: +5
egr_utils_api::RegisterGradientHookForTensor(out0, hook); // hook: +5
// Run Forward Node 1
float scale1 = 5.0;
float bias1 = 10.0;
egr::EagerTensor out1 = egr::scale(
out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/);
RetainGradForTensor(out1); // hook: +5
RegisterGradientHookForTensor(out1, hook); // hook: +5
egr_utils_api::RetainGradForTensor(out1); // hook: +5
egr_utils_api::RegisterGradientHookForTensor(out1, hook); // hook: +5
// Run Forward Node 2
float scale2 = 10.0;
float bias2 = 20.0;
egr::EagerTensor out2 = egr::scale(
out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/);
RetainGradForTensor(out2); // hook: +5
RegisterGradientHookForTensor(out2, hook); // hook: +5
egr_utils_api::RetainGradForTensor(out2); // hook: +5
egr_utils_api::RegisterGradientHookForTensor(out2, hook); // hook: +5
// 4. Run Backward
std::vector<egr::EagerTensor> outs = {out1, out2};
......@@ -259,16 +256,16 @@ TEST(FwdBwdJoint, GradientHook) {
// Examine Backward Grad
// leaf grad
CompareGradTensorWithValue<float>(tensor, 190.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 190.0);
// out0 grad
CompareGradTensorWithValue<float>(out0, 90.0);
eager_test::CompareGradTensorWithValue<float>(out0, 90.0);
// out1 grad
CompareGradTensorWithValue<float>(out1, 1.0);
eager_test::CompareGradTensorWithValue<float>(out1, 1.0);
// out2 grad
CompareGradTensorWithValue<float>(out2, 1.0);
eager_test::CompareGradTensorWithValue<float>(out2, 1.0);
}
/*
......@@ -282,14 +279,14 @@ TEST(FwdBwdJoint, GradientHook) {
out1 out2
*/
TEST(FwdBwdJoint, CrossBatchAccumulation) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
// Run Forward Node 0
......@@ -316,13 +313,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
RunBackward(outs, {});
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 30.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 30.0);
// Cross Batch Accumulation
RunBackward(outs, {});
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 60.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 60.0);
}
/* ---------------------------------------------------- */
......@@ -331,14 +328,14 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST(FwdBwdJoint, SingleNodeCUDA) {
InitEnv(paddle::platform::CUDAPlace());
eager_test::InitEnv(paddle::platform::CUDAPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CUDAPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
float scale = 2.0;
......@@ -347,14 +344,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output
CompareTensorWithValue<float>(out, 13.0);
eager_test::CompareTensorWithValue<float>(out, 13.0);
std::vector<egr::EagerTensor> outs = {out};
// 4. Run Backward
RunBackward(outs, {});
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 2.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 2.0);
}
/*
......@@ -368,14 +365,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
out1 out2
*/
TEST(FwdBwdJoint, BranchedNodesCUDA) {
InitEnv(paddle::platform::CUDAPlace());
eager_test::InitEnv(paddle::platform::CUDAPlace());
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CUDAPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
// Run Forward Node 0
......@@ -398,11 +395,11 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/);
// Examine Forward Output 0
CompareTensorWithValue<float>(out0, 13.0);
eager_test::CompareTensorWithValue<float>(out0, 13.0);
// Examine Forward Output 1
CompareTensorWithValue<float>(out1, 75.0);
eager_test::CompareTensorWithValue<float>(out1, 75.0);
// Examine Forward Output 2
CompareTensorWithValue<float>(out2, 150.0);
eager_test::CompareTensorWithValue<float>(out2, 150.0);
// TODO(jiabin): fix this with add functor
// 4. Run Backward
......@@ -410,8 +407,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
RunBackward(outs, {});
// Examine Backward Grad
CompareGradTensorWithValue<float>(tensor, 30.0);
eager_test::CompareGradTensorWithValue<float>(tensor, 30.0);
}
#endif
} // namespace eager_test
} // namespace egr
......@@ -30,66 +30,63 @@
#include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h"
#include "paddle/pten/core/kernel_registry.h"
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
TEST(Generated, Sigmoid) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
VLOG(6) << "Init Env";
// 1. Prepare Input
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make Dim";
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 0.0, true);
VLOG(6) << "Make EagerTensor";
RetainGradForTensor(tensor);
egr_utils_api::RetainGradForTensor(tensor);
VLOG(6) << "Retain Grad for Tensor";
auto output_tensor = sigmoid_dygraph_function(tensor, {});
VLOG(6) << "Run Backward";
CompareVariableWithValue<float>(output_tensor, 0.5);
eager_test::CompareVariableWithValue<float>(output_tensor, 0.5);
std::vector<egr::EagerTensor> target_tensors = {output_tensor};
VLOG(6) << "Runing Backward";
RunBackward(target_tensors, {});
VLOG(6) << "Finish Backward";
CompareGradVariableWithValue<float>(tensor, 0.25);
eager_test::CompareGradVariableWithValue<float>(tensor, 0.25);
}
TEST(Generated, Matmul_v2) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
auto tracer = std::make_shared<paddle::imperative::Tracer>();
paddle::imperative::SetCurrentTracer(tracer);
// 1. Prepare Input
paddle::framework::DDim ddimX = paddle::framework::make_ddim({4, 16});
egr::EagerTensor X = CreateTensorWithValue(
egr::EagerTensor X = egr_utils_api::CreateTensorWithValue(
ddimX, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 3.0, true);
RetainGradForTensor(X);
egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = paddle::framework::make_ddim({16, 20});
egr::EagerTensor Y = CreateTensorWithValue(
egr::EagerTensor Y = egr_utils_api::CreateTensorWithValue(
ddimY, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 2.0, true);
RetainGradForTensor(Y);
egr_utils_api::RetainGradForTensor(Y);
auto output_tensor = matmul_v2_dygraph_function(
X, Y, {{"trans_x", false}, {"trans_y", false}});
CompareVariableWithValue<float>(output_tensor, 96);
eager_test::CompareVariableWithValue<float>(output_tensor, 96);
std::vector<egr::EagerTensor> target_tensors = {output_tensor};
RunBackward(target_tensors, {});
CompareGradVariableWithValue<float>(X, 2.0 * 20);
CompareGradVariableWithValue<float>(Y, 3.0 * 4);
eager_test::CompareGradVariableWithValue<float>(X, 2.0 * 20);
eager_test::CompareGradVariableWithValue<float>(Y, 3.0 * 4);
}
} // namespace eager_test
} // namespace egr
......@@ -30,9 +30,7 @@
#include "paddle/fluid/eager/tests/test_utils.h"
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
egr::EagerTensor hook_function(const egr::EagerTensor& t) {
auto t_dense = std::dynamic_pointer_cast<pten::DenseTensor>(t.impl());
......@@ -61,14 +59,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
}
TEST(RetainGrad, HookBeforeRetainGrad) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
......@@ -99,8 +97,9 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
auto_grad_meta));
RegisterGradientHookForTensor(target_tensor, hook);
RetainGradForTensor(target_tensor); // result: 1.0 + 3.0 = 4.0
egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook);
egr_utils_api::RetainGradForTensor(
target_tensor); // result: 1.0 + 3.0 = 4.0
}
// Connect ScaleNode -> AccumulationNode via Edge
......@@ -126,25 +125,26 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
auto_grad_meta));
RegisterGradientHookForTensor(leaf_tensor, hook);
RetainGradForTensor(leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0
egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook);
egr_utils_api::RetainGradForTensor(
leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0
}
RunBackward(target_tensors, {});
CompareGradTensorWithValue<float>(target_tensor, 4.0);
CompareGradTensorWithValue<float>(leaf_tensor, 23.0);
eager_test::CompareGradTensorWithValue<float>(target_tensor, 4.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 23.0);
}
TEST(RetainGrad, HookAfterRetainGrad) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
......@@ -173,8 +173,8 @@ TEST(RetainGrad, HookAfterRetainGrad) {
std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
auto_grad_meta));
RetainGradForTensor(target_tensor); // result: 1.0
RegisterGradientHookForTensor(target_tensor, hook);
egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0
egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook);
}
// Connect ScaleNode -> AccumulationNode via Edge
......@@ -200,15 +200,15 @@ TEST(RetainGrad, HookAfterRetainGrad) {
std::dynamic_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
auto_grad_meta));
RetainGradForTensor(leaf_tensor); // RetainGrad for leaf tensor gets
// postponed, result: 4.0*5.0 + 3.0 =
// 23.0
RegisterGradientHookForTensor(leaf_tensor, hook);
egr_utils_api::RetainGradForTensor(
leaf_tensor); // RetainGrad for leaf tensor gets
// postponed, result: 4.0*5.0 + 3.0 =
// 23.0
egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook);
}
RunBackward(target_tensors, {});
CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 23.0);
eager_test::CompareGradTensorWithValue<float>(target_tensor, 1.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 23.0);
}
} // namespace eager_test
} // namespace egr
......@@ -23,39 +23,34 @@
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/kernel_registry.h"
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
namespace eager_test {
namespace egr {
TEST(TensorUtils, Test) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());
// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});
// Create Target Tensor
egr::EagerTensor t = CreateTensorWithValue(
egr::EagerTensor t = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/);
egr::EagerTensor t_grad = CreateTensorWithValue(
egr::EagerTensor t_grad = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
CHECK_EQ(IsLeafTensor(t), true);
CHECK_EQ(egr_utils_api::IsLeafTensor(t), true);
// Test Utils
CompareTensorWithValue<float>(t, 5.0);
eager_test::CompareTensorWithValue<float>(t, 5.0);
egr::AutogradMeta* meta = egr::EagerUtils::autograd_meta(&t);
*meta->MutableGrad() = t_grad;
CompareGradTensorWithValue<float>(t, 1.0);
eager_test::CompareGradTensorWithValue<float>(t, 1.0);
}
} // namespace eager_test
} // namespace egr
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册