diff --git a/paddle/fluid/eager/api/utils/hook_utils.cc b/paddle/fluid/eager/api/utils/hook_utils.cc index 7f85d014fa84253e9d552724b6b54611281791b6..85ff6687e0dbea38b5661e04a7e3efc9caf4ef6f 100644 --- a/paddle/fluid/eager/api/utils/hook_utils.cc +++ b/paddle/fluid/eager/api/utils/hook_utils.cc @@ -20,6 +20,7 @@ #include "paddle/pten/core/dense_tensor.h" namespace egr { +namespace egr_utils_api { void RegisterGradientHookForTensor( const egr::EagerTensor& tensor, @@ -90,4 +91,5 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) { } } +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/hook_utils.h b/paddle/fluid/eager/api/utils/hook_utils.h index bf320f0b15d4a1400d51bd1b2d69036e465d4dd2..7e4faa5a2c701e43e1688d0c9cec732926e36ab9 100644 --- a/paddle/fluid/eager/api/utils/hook_utils.h +++ b/paddle/fluid/eager/api/utils/hook_utils.h @@ -18,6 +18,7 @@ #include "paddle/fluid/eager/grad_node_info.h" #include "paddle/pten/api/all.h" namespace egr { +namespace egr_utils_api { void RegisterGradientHookForTensor( const egr::EagerTensor& tensor, @@ -27,4 +28,5 @@ void RegisterReduceHookForTensor(const egr::EagerTensor& tensor, const std::function& hook); void RetainGradForTensor(const egr::EagerTensor& tensor); +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/tensor_utils.cc b/paddle/fluid/eager/api/utils/tensor_utils.cc index 9dbb308a2c9069dd7143e97b0e1da002efa456d7..ad6c34b7cf86cd4d7338539cde704c6aff3b6b11 100644 --- a/paddle/fluid/eager/api/utils/tensor_utils.cc +++ b/paddle/fluid/eager/api/utils/tensor_utils.cc @@ -26,6 +26,7 @@ #include "paddle/fluid/framework/variable.h" namespace egr { +namespace egr_utils_api { bool IsLeafTensor(const egr::EagerTensor& target) { std::shared_ptr grad_node = EagerUtils::grad_node(target); @@ -58,4 +59,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim, return out; } +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/tensor_utils.h b/paddle/fluid/eager/api/utils/tensor_utils.h index a0d8caf3cb307e0df6a6bf735df2a1dc32b54f1b..b3c4b5968232089c69d3f844ce5c78e94ea5029c 100644 --- a/paddle/fluid/eager/api/utils/tensor_utils.h +++ b/paddle/fluid/eager/api/utils/tensor_utils.h @@ -18,6 +18,7 @@ #include "paddle/pten/api/all.h" namespace egr { +namespace egr_utils_api { // If and only if the tensor holds an AccumulationNode // Then it's treated as a leaf tensor @@ -29,4 +30,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim, const pten::DataLayout& layout, float value, bool is_leaf = true); +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc index 0637ff2bb23d3903ffb8a20fe2fe6a2e07080a02..83185dff9b78124c7dbe79b55351a839538dff4e 100644 --- a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc +++ b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc @@ -32,8 +32,8 @@ #include "gperftools/profiler.h" #endif -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT +using namespace egr; // NOLINT +using namespace egr_utils_api; // NOLINT // Disable pten path DECLARE_bool(run_pten_kernel); diff --git a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc index 96dff14184f40cc233754852deb5b98f100e7273..9fbed05418302935ca03ca66166b9d20a08bac0d 100644 --- a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc +++ b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc @@ -31,8 +31,8 @@ #include "gperftools/profiler.h" #endif -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT +using namespace egr; // NOLINT +using namespace egr_utils_api; // NOLINT DECLARE_bool(run_pten_kernel); diff --git a/paddle/fluid/eager/tests/task_tests/backward_test.cc b/paddle/fluid/eager/tests/task_tests/backward_test.cc index d63cff23ba9c8e5bdb51687c1f2430e17fd6f608..0ec86b7cc360c714444e9a2adb4875dd823992f5 100644 --- a/paddle/fluid/eager/tests/task_tests/backward_test.cc +++ b/paddle/fluid/eager/tests/task_tests/backward_test.cc @@ -30,19 +30,17 @@ #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/tensor_meta.h" -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(Backward, SingleNodeEmptyGrad) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor target_tensor = CreateTensorWithValue( + egr::EagerTensor target_tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); @@ -67,7 +65,7 @@ TEST(Backward, SingleNodeEmptyGrad) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node0 -> AccumulationNode via Edge auto meta = egr::AutogradMeta(); @@ -80,26 +78,26 @@ TEST(Backward, SingleNodeEmptyGrad) { RunBackward(outs, {}); // Check Output Value - CompareGradTensorWithValue(leaf_tensor, 5.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 5.0); } TEST(Backward, SingleNodeCustomGrad) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); std::vector grad_tensors; // Create Grad Tensor - egr::EagerTensor grad_tensor = CreateTensorWithValue( + egr::EagerTensor grad_tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/); grad_tensors.emplace_back(std::move(grad_tensor)); @@ -128,7 +126,7 @@ TEST(Backward, SingleNodeCustomGrad) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node0 -> AccumulationNode via Edge auto meta = egr::AutogradMeta(); @@ -141,7 +139,7 @@ TEST(Backward, SingleNodeCustomGrad) { RunBackward(target_tensors, grad_tensors); // Check Output Value - CompareGradTensorWithValue(leaf_tensor, 50.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 50.0); } /* @@ -153,14 +151,14 @@ Node0 */ TEST(Backward, LinearNodes) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -202,7 +200,7 @@ TEST(Backward, LinearNodes) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node1 -> AccumulationNode via Edge auto meta1 = egr::AutogradMeta(); @@ -215,7 +213,7 @@ TEST(Backward, LinearNodes) { RunBackward(target_tensors, {}); // Check Output Value - CompareGradTensorWithValue(leaf_tensor, 50.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 50.0); } /* @@ -227,17 +225,17 @@ Node0 Node1 */ TEST(Backward, WithAccumulation) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor std::vector target_tensors; - egr::EagerTensor tensor0 = CreateTensorWithValue( + egr::EagerTensor tensor0 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); - egr::EagerTensor tensor1 = CreateTensorWithValue( + egr::EagerTensor tensor1 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor0)); @@ -245,10 +243,10 @@ TEST(Backward, WithAccumulation) { // Create Grad Tensor std::vector grad_tensors; - egr::EagerTensor grad_tensor0 = CreateTensorWithValue( + egr::EagerTensor grad_tensor0 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); - egr::EagerTensor grad_tensor1 = CreateTensorWithValue( + egr::EagerTensor grad_tensor1 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/); grad_tensors.emplace_back(std::move(grad_tensor0)); @@ -303,7 +301,7 @@ TEST(Backward, WithAccumulation) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta2->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node2 -> AccumulationNode via Edge auto meta2 = egr::AutogradMeta(); @@ -314,7 +312,7 @@ TEST(Backward, WithAccumulation) { RunBackward(target_tensors, grad_tensors); - CompareGradTensorWithValue(leaf_tensor, 2500.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 2500.0); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc b/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc index e1e138cdee8ba59953b8b477924c2685e60d3213..52e10b2b1b8a094e40b96537e36cc66c49f1714f 100644 --- a/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc +++ b/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc @@ -31,17 +31,15 @@ #include "paddle/fluid/eager/tests/test_utils.h" -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(CrossBatchAccumulation, SingleScaleNode) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -60,7 +58,7 @@ TEST(CrossBatchAccumulation, SingleScaleNode) { auto_grad_meta->SetGradNode( std::dynamic_pointer_cast(scale_node_ptr)); auto_grad_meta->SetSingleOutRankWithSlot(0, 0); - RetainGradForTensor(target_tensor); // result: 1.0 + egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0 auto meta = AutogradMeta(); meta.SetSingleOutRankWithSlot(0, 0); @@ -71,18 +69,18 @@ TEST(CrossBatchAccumulation, SingleScaleNode) { auto_grad_meta1->SetGradNode( std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); } RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 1.0); - CompareGradTensorWithValue(leaf_tensor, 5.0); + eager_test::CompareGradTensorWithValue(target_tensor, 1.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 5.0); RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 1.0); - CompareGradTensorWithValue(leaf_tensor, 10.0); + eager_test::CompareGradTensorWithValue(target_tensor, 1.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 10.0); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc b/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc index 4d93f0188a746bf12cf52a85861b06c327714eba..c7c27dcc1d1508945295ca80f9c3631eacc23325 100644 --- a/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc +++ b/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc @@ -24,10 +24,7 @@ #include "paddle/pten/api/lib/utils/allocator.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(EagerUtils, AutoGradMeta) { // Construct Eager Tensor @@ -167,7 +164,7 @@ TEST(EagerUtils, PassStopGradient) { TEST(EagerUtils, SyncToVarsSingle) { paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); - auto tensor = eager_test::CreateTestCPUTensor(5.0f, ddim); + auto tensor = CreateTestCPUTensor(5.0f, ddim); std::vector> var_bases = egr::EagerUtils::SyncToVars(tensor); @@ -185,9 +182,8 @@ TEST(EagerUtils, SyncToVarsSingle) { TEST(EagerUtils, SyncToVarsMultiple) { paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); - std::vector tensors = { - eager_test::CreateTestCPUTensor(1.0f, ddim), - eager_test::CreateTestCPUTensor(2.0f, ddim)}; + std::vector tensors = {CreateTestCPUTensor(1.0f, ddim), + CreateTestCPUTensor(2.0f, ddim)}; std::vector> var_bases = egr::EagerUtils::SyncToVars(tensors); @@ -280,4 +276,4 @@ TEST(EagerUtils, ConstructDuplicableOutput) { CHECK(outs[0]->initialized() == false); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc b/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc index 6e23226cde432af048408a7ead3fa07260dabc2d..205f231eceeed5d641a3b31b323e0e60cceb2313 100644 --- a/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc +++ b/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc @@ -27,21 +27,18 @@ #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/tensor_meta.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(Forward, SingleNode) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(t)); @@ -55,7 +52,7 @@ TEST(Forward, SingleNode) { tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output - CompareTensorWithValue(out, 13.0); + eager_test::CompareTensorWithValue(out, 13.0); // Examine GradNode { @@ -80,14 +77,14 @@ Node1 out */ TEST(Forward, LinearNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(t)); @@ -108,10 +105,10 @@ TEST(Forward, LinearNodes) { out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine GradNode { @@ -156,14 +153,14 @@ TEST(Forward, LinearNodes) { out1 out2 */ TEST(Forward, BranchedNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(t)); @@ -190,13 +187,13 @@ TEST(Forward, BranchedNodes) { out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine Forward Output 2 - CompareTensorWithValue(out2, 150.0); + eager_test::CompareTensorWithValue(out2, 150.0); // Examine GradNode { @@ -248,4 +245,4 @@ TEST(Forward, BranchedNodes) { } } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc index 751e95487659cb18f45e5b3ec1b2d018d7aa4fcb..e292844c8ee58694959684a449ff16d6771d41d6 100644 --- a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc +++ b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc @@ -29,10 +29,7 @@ #include "paddle/fluid/eager/tests/test_utils.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { egr::EagerTensor hook_function(const egr::EagerTensor& t) { auto t_dense = std::dynamic_pointer_cast(t.impl()); @@ -61,14 +58,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) { } TEST(FwdBwdJoint, SingleNode) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward float scale = 2.0; @@ -77,7 +74,7 @@ TEST(FwdBwdJoint, SingleNode) { tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output - CompareTensorWithValue(out, 13.0); + eager_test::CompareTensorWithValue(out, 13.0); std::vector outs = {out}; // 4. Run Backward @@ -88,7 +85,7 @@ TEST(FwdBwdJoint, SingleNode) { EagerUtils::unsafe_autograd_meta(tensor)->Grad().impl()) ->data()[0]; // Examine Backward Grad - CompareGradTensorWithValue(tensor, 2.0); + eager_test::CompareGradTensorWithValue(tensor, 2.0); } /* @@ -101,14 +98,14 @@ Node1 out */ TEST(FwdBwdJoint, LinearNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -125,17 +122,17 @@ TEST(FwdBwdJoint, LinearNodes) { out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); std::vector outs = {out1}; // 4. Run Backward RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 10.0); + eager_test::CompareGradTensorWithValue(tensor, 10.0); } /* @@ -149,14 +146,14 @@ TEST(FwdBwdJoint, LinearNodes) { out1 out2 */ TEST(FwdBwdJoint, BranchedNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -179,10 +176,10 @@ TEST(FwdBwdJoint, BranchedNodes) { out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine Forward Output 2 { @@ -201,7 +198,7 @@ TEST(FwdBwdJoint, BranchedNodes) { RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 30.0); + eager_test::CompareGradTensorWithValue(tensor, 30.0); } /* @@ -215,14 +212,14 @@ TEST(FwdBwdJoint, BranchedNodes) { out1 out2 */ TEST(FwdBwdJoint, GradientHook) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); std::function hook = &hook_function; @@ -234,24 +231,24 @@ TEST(FwdBwdJoint, GradientHook) { egr::EagerTensor out0 = egr::scale(tensor, scale0, bias0, true /*bias_after_scale*/, true /*trace_backward*/); - RetainGradForTensor(out0); // hook: +5 - RegisterGradientHookForTensor(out0, hook); // hook: +5 + egr_utils_api::RetainGradForTensor(out0); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out0, hook); // hook: +5 // Run Forward Node 1 float scale1 = 5.0; float bias1 = 10.0; egr::EagerTensor out1 = egr::scale( out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); - RetainGradForTensor(out1); // hook: +5 - RegisterGradientHookForTensor(out1, hook); // hook: +5 + egr_utils_api::RetainGradForTensor(out1); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out1, hook); // hook: +5 // Run Forward Node 2 float scale2 = 10.0; float bias2 = 20.0; egr::EagerTensor out2 = egr::scale( out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); - RetainGradForTensor(out2); // hook: +5 - RegisterGradientHookForTensor(out2, hook); // hook: +5 + egr_utils_api::RetainGradForTensor(out2); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out2, hook); // hook: +5 // 4. Run Backward std::vector outs = {out1, out2}; @@ -259,16 +256,16 @@ TEST(FwdBwdJoint, GradientHook) { // Examine Backward Grad // leaf grad - CompareGradTensorWithValue(tensor, 190.0); + eager_test::CompareGradTensorWithValue(tensor, 190.0); // out0 grad - CompareGradTensorWithValue(out0, 90.0); + eager_test::CompareGradTensorWithValue(out0, 90.0); // out1 grad - CompareGradTensorWithValue(out1, 1.0); + eager_test::CompareGradTensorWithValue(out1, 1.0); // out2 grad - CompareGradTensorWithValue(out2, 1.0); + eager_test::CompareGradTensorWithValue(out2, 1.0); } /* @@ -282,14 +279,14 @@ TEST(FwdBwdJoint, GradientHook) { out1 out2 */ TEST(FwdBwdJoint, CrossBatchAccumulation) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -316,13 +313,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 30.0); + eager_test::CompareGradTensorWithValue(tensor, 30.0); // Cross Batch Accumulation RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 60.0); + eager_test::CompareGradTensorWithValue(tensor, 60.0); } /* ---------------------------------------------------- */ @@ -331,14 +328,14 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) TEST(FwdBwdJoint, SingleNodeCUDA) { - InitEnv(paddle::platform::CUDAPlace()); + eager_test::InitEnv(paddle::platform::CUDAPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CUDAPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward float scale = 2.0; @@ -347,14 +344,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output - CompareTensorWithValue(out, 13.0); + eager_test::CompareTensorWithValue(out, 13.0); std::vector outs = {out}; // 4. Run Backward RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 2.0); + eager_test::CompareGradTensorWithValue(tensor, 2.0); } /* @@ -368,14 +365,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { out1 out2 */ TEST(FwdBwdJoint, BranchedNodesCUDA) { - InitEnv(paddle::platform::CUDAPlace()); + eager_test::InitEnv(paddle::platform::CUDAPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CUDAPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -398,11 +395,11 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine Forward Output 2 - CompareTensorWithValue(out2, 150.0); + eager_test::CompareTensorWithValue(out2, 150.0); // TODO(jiabin): fix this with add functor // 4. Run Backward @@ -410,8 +407,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 30.0); + eager_test::CompareGradTensorWithValue(tensor, 30.0); } #endif -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/generated_test.cc b/paddle/fluid/eager/tests/task_tests/generated_test.cc index eb8d1e517eaf3b0c27262b581c749a87cccf8001..9d6e3310678345fed77957f40f2a1c12060efa77 100644 --- a/paddle/fluid/eager/tests/task_tests/generated_test.cc +++ b/paddle/fluid/eager/tests/task_tests/generated_test.cc @@ -30,66 +30,63 @@ #include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h" #include "paddle/pten/core/kernel_registry.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(Generated, Sigmoid) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); VLOG(6) << "Init Env"; // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); VLOG(6) << "Make Dim"; - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 0.0, true); VLOG(6) << "Make EagerTensor"; - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); VLOG(6) << "Retain Grad for Tensor"; auto output_tensor = sigmoid_dygraph_function(tensor, {}); VLOG(6) << "Run Backward"; - CompareVariableWithValue(output_tensor, 0.5); + eager_test::CompareVariableWithValue(output_tensor, 0.5); std::vector target_tensors = {output_tensor}; VLOG(6) << "Runing Backward"; RunBackward(target_tensors, {}); VLOG(6) << "Finish Backward"; - CompareGradVariableWithValue(tensor, 0.25); + eager_test::CompareGradVariableWithValue(tensor, 0.25); } TEST(Generated, Matmul_v2) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); auto tracer = std::make_shared(); paddle::imperative::SetCurrentTracer(tracer); // 1. Prepare Input paddle::framework::DDim ddimX = paddle::framework::make_ddim({4, 16}); - egr::EagerTensor X = CreateTensorWithValue( + egr::EagerTensor X = egr_utils_api::CreateTensorWithValue( ddimX, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 3.0, true); - RetainGradForTensor(X); + egr_utils_api::RetainGradForTensor(X); paddle::framework::DDim ddimY = paddle::framework::make_ddim({16, 20}); - egr::EagerTensor Y = CreateTensorWithValue( + egr::EagerTensor Y = egr_utils_api::CreateTensorWithValue( ddimY, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 2.0, true); - RetainGradForTensor(Y); + egr_utils_api::RetainGradForTensor(Y); auto output_tensor = matmul_v2_dygraph_function( X, Y, {{"trans_x", false}, {"trans_y", false}}); - CompareVariableWithValue(output_tensor, 96); + eager_test::CompareVariableWithValue(output_tensor, 96); std::vector target_tensors = {output_tensor}; RunBackward(target_tensors, {}); - CompareGradVariableWithValue(X, 2.0 * 20); - CompareGradVariableWithValue(Y, 3.0 * 4); + eager_test::CompareGradVariableWithValue(X, 2.0 * 20); + eager_test::CompareGradVariableWithValue(Y, 3.0 * 4); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/hook_test.cc b/paddle/fluid/eager/tests/task_tests/hook_test.cc index 326240d0cb7b97f65c98e7a41ead6db973a56d68..32b28d8efd21b85fdafbdee9211d31be2e465c7e 100644 --- a/paddle/fluid/eager/tests/task_tests/hook_test.cc +++ b/paddle/fluid/eager/tests/task_tests/hook_test.cc @@ -30,9 +30,7 @@ #include "paddle/fluid/eager/tests/test_utils.h" -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { egr::EagerTensor hook_function(const egr::EagerTensor& t) { auto t_dense = std::dynamic_pointer_cast(t.impl()); @@ -61,14 +59,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) { } TEST(RetainGrad, HookBeforeRetainGrad) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -99,8 +97,9 @@ TEST(RetainGrad, HookBeforeRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RegisterGradientHookForTensor(target_tensor, hook); - RetainGradForTensor(target_tensor); // result: 1.0 + 3.0 = 4.0 + egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook); + egr_utils_api::RetainGradForTensor( + target_tensor); // result: 1.0 + 3.0 = 4.0 } // Connect ScaleNode -> AccumulationNode via Edge @@ -126,25 +125,26 @@ TEST(RetainGrad, HookBeforeRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RegisterGradientHookForTensor(leaf_tensor, hook); - RetainGradForTensor(leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0 + egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook); + egr_utils_api::RetainGradForTensor( + leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0 } RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 4.0); - CompareGradTensorWithValue(leaf_tensor, 23.0); + eager_test::CompareGradTensorWithValue(target_tensor, 4.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 23.0); } TEST(RetainGrad, HookAfterRetainGrad) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -173,8 +173,8 @@ TEST(RetainGrad, HookAfterRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RetainGradForTensor(target_tensor); // result: 1.0 - RegisterGradientHookForTensor(target_tensor, hook); + egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0 + egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook); } // Connect ScaleNode -> AccumulationNode via Edge @@ -200,15 +200,15 @@ TEST(RetainGrad, HookAfterRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RetainGradForTensor(leaf_tensor); // RetainGrad for leaf tensor gets - // postponed, result: 4.0*5.0 + 3.0 = - // 23.0 - RegisterGradientHookForTensor(leaf_tensor, hook); + egr_utils_api::RetainGradForTensor( + leaf_tensor); // RetainGrad for leaf tensor gets + // postponed, result: 4.0*5.0 + 3.0 = + // 23.0 + egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook); } RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 1.0); - CompareGradTensorWithValue(leaf_tensor, 23.0); + eager_test::CompareGradTensorWithValue(target_tensor, 1.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 23.0); } - -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc b/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc index 5b96c726b22285645bfc5098ddd54448c9d65cda..5e86cac83a285f8637a01689fcdfa7caf81d19e1 100644 --- a/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc +++ b/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc @@ -23,39 +23,34 @@ #include "paddle/fluid/eager/tests/test_utils.h" #include "paddle/pten/api/lib/utils/allocator.h" -#include "paddle/pten/core/kernel_registry.h" - -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(TensorUtils, Test) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - egr::EagerTensor t_grad = CreateTensorWithValue( + egr::EagerTensor t_grad = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); - CHECK_EQ(IsLeafTensor(t), true); + CHECK_EQ(egr_utils_api::IsLeafTensor(t), true); // Test Utils - CompareTensorWithValue(t, 5.0); + eager_test::CompareTensorWithValue(t, 5.0); egr::AutogradMeta* meta = egr::EagerUtils::autograd_meta(&t); *meta->MutableGrad() = t_grad; - CompareGradTensorWithValue(t, 1.0); + eager_test::CompareGradTensorWithValue(t, 1.0); } -} // namespace eager_test +} // namespace egr