未验证 提交 77bef883 编写于 作者: G gouzil 提交者: GitHub

[build] fix no member named 'CreateTensorWithValue' (#55042)

上级 3e5018df
...@@ -44,20 +44,19 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT); ...@@ -44,20 +44,19 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT);
using namespace egr; // NOLINT using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT using namespace egr_utils_api; // NOLINT
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCPU) { TEST(Benchmark, EagerScaleCPU) {
// Prepare Device Contexts // Prepare Device Contexts
eager_test::InitEnv(paddle::platform::CPUPlace()); eager_test::InitEnv(paddle::platform::CPUPlace());
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0, phi::DataLayout::NCHW,
true); 5.0,
true);
RetainGradForTensor(tensor); RetainGradForTensor(tensor);
if (mode == "Accuracy") { if (mode == "Accuracy") {
...@@ -91,21 +90,23 @@ TEST(Benchmark, EagerMatmulCPU) { ...@@ -91,21 +90,23 @@ TEST(Benchmark, EagerMatmulCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0, phi::DataLayout::NCHW,
true); 1.0,
true);
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
RetainGradForTensor(Y); RetainGradForTensor(Y);
if (mode == "Accuracy") { if (mode == "Accuracy") {
...@@ -141,21 +142,23 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) { ...@@ -141,21 +142,23 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0, phi::DataLayout::NCHW,
true); 1.0,
true);
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
RetainGradForTensor(Y); RetainGradForTensor(Y);
if (mode == "Accuracy") { if (mode == "Accuracy") {
...@@ -191,33 +194,36 @@ TEST(Benchmark, EagerIntermediateMLPCPU) { ...@@ -191,33 +194,36 @@ TEST(Benchmark, EagerIntermediateMLPCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N}); paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
MLP_X_VAL, phi::DataLayout::NCHW,
true); MLP_X_VAL,
true);
RetainGradForTensor(X); RetainGradForTensor(X);
std::vector<paddle::Tensor> Ws; std::vector<paddle::Tensor> Ws;
std::vector<paddle::Tensor> Bs; std::vector<paddle::Tensor> Bs;
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) { for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K}); paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K});
paddle::Tensor W = CreateTensorWithValue(ddimW, paddle::Tensor W =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimW,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
MLP_W_VAL, phi::DataLayout::NCHW,
true); MLP_W_VAL,
true);
RetainGradForTensor(W); RetainGradForTensor(W);
paddle::framework::DDim ddimB = phi::make_ddim({MLP_K}); paddle::framework::DDim ddimB = phi::make_ddim({MLP_K});
paddle::Tensor B = CreateTensorWithValue(ddimB, paddle::Tensor B =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimB,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
MLP_B_VAL, phi::DataLayout::NCHW,
true); MLP_B_VAL,
true);
RetainGradForTensor(B); RetainGradForTensor(B);
Ws.emplace_back(std::move(W)); Ws.emplace_back(std::move(W));
......
...@@ -45,19 +45,18 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT); ...@@ -45,19 +45,18 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCUDA) { TEST(Benchmark, EagerScaleCUDA) {
eager_test::InitEnv(paddle::platform::CUDAPlace()); eager_test::InitEnv(paddle::platform::CUDAPlace());
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
RetainGradForTensor(tensor); RetainGradForTensor(tensor);
if (mode == "Accuracy") { if (mode == "Accuracy") {
...@@ -93,21 +92,23 @@ TEST(Benchmark, EagerMatmulCUDA) { ...@@ -93,21 +92,23 @@ TEST(Benchmark, EagerMatmulCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0, phi::DataLayout::NCHW,
true); 1.0,
true);
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
RetainGradForTensor(Y); RetainGradForTensor(Y);
if (mode == "Accuracy") { if (mode == "Accuracy") {
...@@ -147,21 +148,23 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) { ...@@ -147,21 +148,23 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0, phi::DataLayout::NCHW,
true); 1.0,
true);
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
RetainGradForTensor(Y); RetainGradForTensor(Y);
if (mode == "Accuracy") { if (mode == "Accuracy") {
...@@ -201,33 +204,36 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) { ...@@ -201,33 +204,36 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N}); paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
MLP_X_VAL, phi::DataLayout::NCHW,
true); MLP_X_VAL,
true);
RetainGradForTensor(X); RetainGradForTensor(X);
std::vector<paddle::Tensor> Ws; std::vector<paddle::Tensor> Ws;
std::vector<paddle::Tensor> Bs; std::vector<paddle::Tensor> Bs;
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) { for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K}); paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K});
paddle::Tensor W = CreateTensorWithValue(ddimW, paddle::Tensor W =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimW,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
MLP_W_VAL, phi::DataLayout::NCHW,
true); MLP_W_VAL,
true);
RetainGradForTensor(W); RetainGradForTensor(W);
paddle::framework::DDim ddimB = phi::make_ddim({MLP_K}); paddle::framework::DDim ddimB = phi::make_ddim({MLP_K});
paddle::Tensor B = CreateTensorWithValue(ddimB, paddle::Tensor B =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddimB,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
MLP_B_VAL, phi::DataLayout::NCHW,
true); MLP_B_VAL,
true);
RetainGradForTensor(B); RetainGradForTensor(B);
Ws.emplace_back(std::move(W)); Ws.emplace_back(std::move(W));
......
...@@ -31,8 +31,6 @@ ...@@ -31,8 +31,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Backward, SingleNodeEmptyGrad) { TEST(Backward, SingleNodeEmptyGrad) {
...@@ -44,12 +42,12 @@ TEST(Backward, SingleNodeEmptyGrad) { ...@@ -44,12 +42,12 @@ TEST(Backward, SingleNodeEmptyGrad) {
// Create Target Tensor // Create Target Tensor
paddle::Tensor target_tensor = paddle::Tensor target_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor leaf_tensor; paddle::Tensor leaf_tensor;
{ {
...@@ -95,23 +93,24 @@ TEST(Backward, SingleNodeCustomGrad) { ...@@ -95,23 +93,24 @@ TEST(Backward, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor // Create Grad Tensor
paddle::Tensor grad_tensor = paddle::Tensor grad_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor)); grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor; paddle::Tensor leaf_tensor;
...@@ -166,12 +165,13 @@ TEST(Backward, LinearNodes) { ...@@ -166,12 +165,13 @@ TEST(Backward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor; paddle::Tensor leaf_tensor;
...@@ -241,37 +241,39 @@ TEST(Backward, WithAccumulation) { ...@@ -241,37 +241,39 @@ TEST(Backward, WithAccumulation) {
// Create Target Tensor // Create Target Tensor
std::vector<paddle::Tensor> target_tensors; std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, false /*is_leaf*/);
paddle::platform::CPUPlace(), paddle::Tensor tensor1 =
phi::DataType::FLOAT32, eager_test::CreateTensorWithValue(ddim,
phi::DataLayout::NCHW, paddle::platform::CPUPlace(),
1.0 /*value*/, phi::DataType::FLOAT32,
false /*is_leaf*/); phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0)); target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1)); target_tensors.emplace_back(std::move(tensor1));
// Create Grad Tensor // Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 = paddle::Tensor grad_tensor0 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor grad_tensor1 = paddle::Tensor grad_tensor1 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0)); grad_tensors.emplace_back(std::move(grad_tensor0));
grad_tensors.emplace_back(std::move(grad_tensor1)); grad_tensors.emplace_back(std::move(grad_tensor1));
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Forward, SingleNode) { TEST(Forward, SingleNode) {
...@@ -40,12 +38,13 @@ TEST(Forward, SingleNode) { ...@@ -40,12 +38,13 @@ TEST(Forward, SingleNode) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 5.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(t)); target_tensors.emplace_back(std::move(t));
paddle::Tensor& tensor = target_tensors[0]; paddle::Tensor& tensor = target_tensors[0];
EagerUtils::autograd_meta(&tensor)->SetStopGradient(false); EagerUtils::autograd_meta(&tensor)->SetStopGradient(false);
...@@ -89,12 +88,13 @@ TEST(Forward, LinearNodes) { ...@@ -89,12 +88,13 @@ TEST(Forward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 5.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(t)); target_tensors.emplace_back(std::move(t));
paddle::Tensor& tensor = target_tensors[0]; paddle::Tensor& tensor = target_tensors[0];
EagerUtils::autograd_meta(&tensor)->SetStopGradient(false); EagerUtils::autograd_meta(&tensor)->SetStopGradient(false);
...@@ -174,12 +174,13 @@ TEST(Forward, BranchedNodes) { ...@@ -174,12 +174,13 @@ TEST(Forward, BranchedNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 5.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(t)); target_tensors.emplace_back(std::move(t));
paddle::Tensor& tensor = target_tensors[0]; paddle::Tensor& tensor = target_tensors[0];
EagerUtils::autograd_meta(&tensor)->SetStopGradient(false); EagerUtils::autograd_meta(&tensor)->SetStopGradient(false);
......
...@@ -35,8 +35,6 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT); ...@@ -35,8 +35,6 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT);
#endif #endif
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -67,12 +65,13 @@ TEST(FwdBwdJoint, SingleNode) { ...@@ -67,12 +65,13 @@ TEST(FwdBwdJoint, SingleNode) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -110,12 +109,13 @@ TEST(FwdBwdJoint, LinearNodes) { ...@@ -110,12 +109,13 @@ TEST(FwdBwdJoint, LinearNodes) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -163,12 +163,13 @@ TEST(FwdBwdJoint, BranchedNodes) { ...@@ -163,12 +163,13 @@ TEST(FwdBwdJoint, BranchedNodes) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -235,12 +236,13 @@ TEST(FwdBwdJoint, GradientHook) { ...@@ -235,12 +236,13 @@ TEST(FwdBwdJoint, GradientHook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -307,12 +309,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { ...@@ -307,12 +309,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -361,12 +364,13 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { ...@@ -361,12 +364,13 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -401,12 +405,13 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { ...@@ -401,12 +405,13 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CUDAPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CUDAPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
......
...@@ -34,8 +34,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT); ...@@ -34,8 +34,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Generated, Sigmoid) { TEST(Generated, Sigmoid) {
...@@ -45,12 +43,13 @@ TEST(Generated, Sigmoid) { ...@@ -45,12 +43,13 @@ TEST(Generated, Sigmoid) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make Dim"; VLOG(6) << "Make Dim";
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
0.0, phi::DataLayout::NCHW,
true); 0.0,
true);
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
VLOG(6) << "Retain Grad for Tensor"; VLOG(6) << "Retain Grad for Tensor";
...@@ -75,21 +74,23 @@ TEST(Generated, Matmul_v2) { ...@@ -75,21 +74,23 @@ TEST(Generated, Matmul_v2) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
3.0, phi::DataLayout::NCHW,
true); 3.0,
true);
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
egr_utils_api::RetainGradForTensor(Y); egr_utils_api::RetainGradForTensor(Y);
auto output_tensor = matmul_v2_dygraph_function( auto output_tensor = matmul_v2_dygraph_function(
...@@ -113,21 +114,23 @@ TEST(Generated, ElementwiseAdd) { ...@@ -113,21 +114,23 @@ TEST(Generated, ElementwiseAdd) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
3.0, phi::DataLayout::NCHW,
true); 3.0,
true);
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16}); paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
egr_utils_api::RetainGradForTensor(Y); egr_utils_api::RetainGradForTensor(Y);
auto output_tensor = elementwise_add_dygraph_function(X, Y, {}); auto output_tensor = elementwise_add_dygraph_function(X, Y, {});
......
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Grad, SingleNodeEmptyGrad) { TEST(Grad, SingleNodeEmptyGrad) {
...@@ -43,21 +41,21 @@ TEST(Grad, SingleNodeEmptyGrad) { ...@@ -43,21 +41,21 @@ TEST(Grad, SingleNodeEmptyGrad) {
// Create Target Tensor (output) // Create Target Tensor (output)
paddle::Tensor output_tensor = paddle::Tensor output_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
// Create input tensor // Create input tensor
const paddle::Tensor leaf_tensor = const paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
{ {
// Create Scale Node // Create Scale Node
...@@ -109,32 +107,33 @@ TEST(Grad, SingleNodeCustomGrad) { ...@@ -109,32 +107,33 @@ TEST(Grad, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor // Create Grad Tensor
paddle::Tensor grad_tensor = paddle::Tensor grad_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor)); grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor = paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
{ {
// Create Scale Node // Create Scale Node
...@@ -187,21 +186,22 @@ TEST(Grad, LinearNodes) { ...@@ -187,21 +186,22 @@ TEST(Grad, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor = paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
{ {
// Create Node0 // Create Node0
auto node0_ptr = std::make_shared<GradNodeScale>(1, 1); auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
...@@ -268,37 +268,39 @@ TEST(Grad, WithAccumulation) { ...@@ -268,37 +268,39 @@ TEST(Grad, WithAccumulation) {
// Create Target Tensor // Create Target Tensor
std::vector<paddle::Tensor> target_tensors; std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, false /*is_leaf*/);
paddle::platform::CPUPlace(), paddle::Tensor tensor1 =
phi::DataType::FLOAT32, eager_test::CreateTensorWithValue(ddim,
phi::DataLayout::NCHW, paddle::platform::CPUPlace(),
1.0 /*value*/, phi::DataType::FLOAT32,
false /*is_leaf*/); phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0)); target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1)); target_tensors.emplace_back(std::move(tensor1));
// Create Grad Tensor // Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 = paddle::Tensor grad_tensor0 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor grad_tensor1 = paddle::Tensor grad_tensor1 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0)); grad_tensors.emplace_back(std::move(grad_tensor0));
grad_tensors.emplace_back(std::move(grad_tensor1)); grad_tensors.emplace_back(std::move(grad_tensor1));
......
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -65,12 +63,13 @@ TEST(RetainGrad, HookBeforeRetainGrad) { ...@@ -65,12 +63,13 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0]; paddle::Tensor& target_tensor = target_tensors[0];
...@@ -140,12 +139,13 @@ TEST(RetainGrad, HookAfterRetainGrad) { ...@@ -140,12 +139,13 @@ TEST(RetainGrad, HookAfterRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0]; paddle::Tensor& target_tensor = target_tensors[0];
......
...@@ -33,8 +33,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT); ...@@ -33,8 +33,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -69,12 +67,13 @@ void test_sigmoid(bool is_remove_gradient_hook) { ...@@ -69,12 +67,13 @@ void test_sigmoid(bool is_remove_gradient_hook) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
0.0, phi::DataLayout::NCHW,
true); 0.0,
true);
VLOG(6) << "Make ReduceHook function"; VLOG(6) << "Make ReduceHook function";
auto reduce_hook = [&](void) -> void { auto reduce_hook = [&](void) -> void {
...@@ -133,21 +132,23 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) { ...@@ -133,21 +132,23 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
3.0, phi::DataLayout::NCHW,
true); 3.0,
true);
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16}); paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
auto reduce_hook = [&]() -> void { auto reduce_hook = [&]() -> void {
auto* t_ptr = auto* t_ptr =
...@@ -195,21 +196,23 @@ void test_matmul(bool is_remove_gradient_hook) { ...@@ -195,21 +196,23 @@ void test_matmul(bool is_remove_gradient_hook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
3.0, phi::DataLayout::NCHW,
true); 3.0,
true);
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
auto reduce_hook = [&](void) -> void { auto reduce_hook = [&](void) -> void {
auto* t_ptr = auto* t_ptr =
...@@ -256,21 +259,23 @@ void test_backward_final_hooks() { ...@@ -256,21 +259,23 @@ void test_backward_final_hooks() {
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimX,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
3.0, phi::DataLayout::NCHW,
true); 3.0,
true);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddimY,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
2.0, phi::DataLayout::NCHW,
true); 2.0,
true);
VLOG(6) << "Make ReduceHook function"; VLOG(6) << "Make ReduceHook function";
auto backward_final_hook = [&](void) -> void { auto backward_final_hook = [&](void) -> void {
......
...@@ -24,8 +24,6 @@ ...@@ -24,8 +24,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(TensorUtils, Test) { TEST(TensorUtils, Test) {
...@@ -37,19 +35,21 @@ TEST(TensorUtils, Test) { ...@@ -37,19 +35,21 @@ TEST(TensorUtils, Test) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor t_grad = CreateTensorWithValue(ddim, paddle::Tensor t_grad =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
1.0 /*value*/, phi::DataLayout::NCHW,
false /*is_leaf*/); 1.0 /*value*/,
false /*is_leaf*/);
CHECK_EQ(EagerUtils::IsLeafTensor(t), true); CHECK_EQ(EagerUtils::IsLeafTensor(t), true);
......
...@@ -65,8 +65,6 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT); ...@@ -65,8 +65,6 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT);
#endif #endif
using eager_test::CreateTensorWithValue;
namespace paddle { namespace paddle {
namespace prim { namespace prim {
...@@ -77,19 +75,21 @@ TEST(EagerPrim, TanhBackwardTest) { ...@@ -77,19 +75,21 @@ TEST(EagerPrim, TanhBackwardTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::FLOAT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::FLOAT32,
5.0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 5.0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1); ::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once // 3. Run Forward once
paddle::Tensor out0 = tanh_ad_func(tensor0); paddle::Tensor out0 = tanh_ad_func(tensor0);
...@@ -132,19 +132,21 @@ TEST(EagerPrim, LogicalOperantsTest) { ...@@ -132,19 +132,21 @@ TEST(EagerPrim, LogicalOperantsTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::INT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::INT32,
1 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 1 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::INT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::INT32,
0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1); ::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once // 3. Run Forward once
paddle::Tensor out0 = tensor0 & tensor1; paddle::Tensor out0 = tensor0 & tensor1;
...@@ -168,19 +170,21 @@ TEST(EagerPrim, CompareOperantsTest) { ...@@ -168,19 +170,21 @@ TEST(EagerPrim, CompareOperantsTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::INT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::INT32,
1 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 1 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
paddle::platform::CPUPlace(), eager_test::CreateTensorWithValue(ddim,
phi::DataType::INT32, paddle::platform::CPUPlace(),
phi::DataLayout::NCHW, phi::DataType::INT32,
0 /*value*/, phi::DataLayout::NCHW,
true /*is_leaf*/); 0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1); ::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once // 3. Run Forward once
paddle::Tensor out0 = (tensor0 < tensor1); paddle::Tensor out0 = (tensor0 < tensor1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册