未验证 提交 77bef883 编写于 作者: G gouzil 提交者: GitHub

[build] fix no member named 'CreateTensorWithValue' (#55042)

上级 3e5018df
......@@ -44,20 +44,19 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT);
using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCPU) {
// Prepare Device Contexts
eager_test::InitEnv(paddle::platform::CPUPlace());
for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0,
true);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0,
true);
RetainGradForTensor(tensor);
if (mode == "Accuracy") {
......@@ -91,21 +90,23 @@ TEST(Benchmark, EagerMatmulCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
RetainGradForTensor(Y);
if (mode == "Accuracy") {
......@@ -141,21 +142,23 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
RetainGradForTensor(Y);
if (mode == "Accuracy") {
......@@ -191,33 +194,36 @@ TEST(Benchmark, EagerIntermediateMLPCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_X_VAL,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_X_VAL,
true);
RetainGradForTensor(X);
std::vector<paddle::Tensor> Ws;
std::vector<paddle::Tensor> Bs;
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K});
paddle::Tensor W = CreateTensorWithValue(ddimW,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_W_VAL,
true);
paddle::Tensor W =
eager_test::CreateTensorWithValue(ddimW,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_W_VAL,
true);
RetainGradForTensor(W);
paddle::framework::DDim ddimB = phi::make_ddim({MLP_K});
paddle::Tensor B = CreateTensorWithValue(ddimB,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_B_VAL,
true);
paddle::Tensor B =
eager_test::CreateTensorWithValue(ddimB,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_B_VAL,
true);
RetainGradForTensor(B);
Ws.emplace_back(std::move(W));
......
......@@ -45,19 +45,18 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCUDA) {
eager_test::InitEnv(paddle::platform::CUDAPlace());
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
RetainGradForTensor(tensor);
if (mode == "Accuracy") {
......@@ -93,21 +92,23 @@ TEST(Benchmark, EagerMatmulCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
RetainGradForTensor(Y);
if (mode == "Accuracy") {
......@@ -147,21 +148,23 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0,
true);
RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
RetainGradForTensor(Y);
if (mode == "Accuracy") {
......@@ -201,33 +204,36 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_X_VAL,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_X_VAL,
true);
RetainGradForTensor(X);
std::vector<paddle::Tensor> Ws;
std::vector<paddle::Tensor> Bs;
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K});
paddle::Tensor W = CreateTensorWithValue(ddimW,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_W_VAL,
true);
paddle::Tensor W =
eager_test::CreateTensorWithValue(ddimW,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_W_VAL,
true);
RetainGradForTensor(W);
paddle::framework::DDim ddimB = phi::make_ddim({MLP_K});
paddle::Tensor B = CreateTensorWithValue(ddimB,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_B_VAL,
true);
paddle::Tensor B =
eager_test::CreateTensorWithValue(ddimB,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
MLP_B_VAL,
true);
RetainGradForTensor(B);
Ws.emplace_back(std::move(W));
......
......@@ -31,8 +31,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
TEST(Backward, SingleNodeEmptyGrad) {
......@@ -44,12 +42,12 @@ TEST(Backward, SingleNodeEmptyGrad) {
// Create Target Tensor
paddle::Tensor target_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor leaf_tensor;
{
......@@ -95,23 +93,24 @@ TEST(Backward, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor
paddle::Tensor grad_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor;
......@@ -166,12 +165,13 @@ TEST(Backward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor;
......@@ -241,37 +241,39 @@ TEST(Backward, WithAccumulation) {
// Create Target Tensor
std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1));
// Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor grad_tensor1 =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0));
grad_tensors.emplace_back(std::move(grad_tensor1));
......
......@@ -27,8 +27,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
TEST(Forward, SingleNode) {
......@@ -40,12 +38,13 @@ TEST(Forward, SingleNode) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(t));
paddle::Tensor& tensor = target_tensors[0];
EagerUtils::autograd_meta(&tensor)->SetStopGradient(false);
......@@ -89,12 +88,13 @@ TEST(Forward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(t));
paddle::Tensor& tensor = target_tensors[0];
EagerUtils::autograd_meta(&tensor)->SetStopGradient(false);
......@@ -174,12 +174,13 @@ TEST(Forward, BranchedNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(t));
paddle::Tensor& tensor = target_tensors[0];
EagerUtils::autograd_meta(&tensor)->SetStopGradient(false);
......
......@@ -35,8 +35,6 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT);
#endif
using eager_test::CreateTensorWithValue;
namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) {
......@@ -67,12 +65,13 @@ TEST(FwdBwdJoint, SingleNode) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......@@ -110,12 +109,13 @@ TEST(FwdBwdJoint, LinearNodes) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......@@ -163,12 +163,13 @@ TEST(FwdBwdJoint, BranchedNodes) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......@@ -235,12 +236,13 @@ TEST(FwdBwdJoint, GradientHook) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......@@ -307,12 +309,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......@@ -361,12 +364,13 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......@@ -401,12 +405,13 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward
......
......@@ -34,8 +34,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
TEST(Generated, Sigmoid) {
......@@ -45,12 +43,13 @@ TEST(Generated, Sigmoid) {
// 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make Dim";
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
0.0,
true);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
0.0,
true);
VLOG(6) << "Make paddle::Tensor";
egr_utils_api::RetainGradForTensor(tensor);
VLOG(6) << "Retain Grad for Tensor";
......@@ -75,21 +74,23 @@ TEST(Generated, Matmul_v2) {
// 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
egr_utils_api::RetainGradForTensor(Y);
auto output_tensor = matmul_v2_dygraph_function(
......@@ -113,21 +114,23 @@ TEST(Generated, ElementwiseAdd) {
// 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
egr_utils_api::RetainGradForTensor(Y);
auto output_tensor = elementwise_add_dygraph_function(X, Y, {});
......
......@@ -30,8 +30,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
TEST(Grad, SingleNodeEmptyGrad) {
......@@ -43,21 +41,21 @@ TEST(Grad, SingleNodeEmptyGrad) {
// Create Target Tensor (output)
paddle::Tensor output_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
// Create input tensor
const paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
true /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
true /*is_leaf*/);
{
// Create Scale Node
......@@ -109,32 +107,33 @@ TEST(Grad, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor
paddle::Tensor grad_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
true /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
true /*is_leaf*/);
{
// Create Scale Node
......@@ -187,21 +186,22 @@ TEST(Grad, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
true /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
true /*is_leaf*/);
{
// Create Node0
auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
......@@ -268,37 +268,39 @@ TEST(Grad, WithAccumulation) {
// Create Target Tensor
std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1));
// Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor grad_tensor1 =
CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
10.0 /*value*/,
false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0));
grad_tensors.emplace_back(std::move(grad_tensor1));
......
......@@ -30,8 +30,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) {
......@@ -65,12 +63,13 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0];
......@@ -140,12 +139,13 @@ TEST(RetainGrad, HookAfterRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0];
......
......@@ -33,8 +33,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) {
......@@ -69,12 +67,13 @@ void test_sigmoid(bool is_remove_gradient_hook) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make paddle::Tensor";
paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
0.0,
true);
paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
0.0,
true);
VLOG(6) << "Make ReduceHook function";
auto reduce_hook = [&](void) -> void {
......@@ -133,21 +132,23 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) {
// 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
auto reduce_hook = [&]() -> void {
auto* t_ptr =
......@@ -195,21 +196,23 @@ void test_matmul(bool is_remove_gradient_hook) {
// 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
auto reduce_hook = [&](void) -> void {
auto* t_ptr =
......@@ -256,21 +259,23 @@ void test_backward_final_hooks() {
VLOG(6) << "Make paddle::Tensor";
paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
3.0,
true);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
egr_utils_api::RetainGradForTensor(X);
paddle::Tensor Y = CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
2.0,
true);
VLOG(6) << "Make ReduceHook function";
auto backward_final_hook = [&](void) -> void {
......
......@@ -24,8 +24,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr {
TEST(TensorUtils, Test) {
......@@ -37,19 +35,21 @@ TEST(TensorUtils, Test) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor t_grad = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
paddle::Tensor t_grad =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
1.0 /*value*/,
false /*is_leaf*/);
CHECK_EQ(EagerUtils::IsLeafTensor(t), true);
......
......@@ -65,8 +65,6 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT);
#endif
using eager_test::CreateTensorWithValue;
namespace paddle {
namespace prim {
......@@ -77,19 +75,21 @@ TEST(EagerPrim, TanhBackwardTest) {
paddle::prim::InitTensorOperants();
// 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::FLOAT32,
phi::DataLayout::NCHW,
5.0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once
paddle::Tensor out0 = tanh_ad_func(tensor0);
......@@ -132,19 +132,21 @@ TEST(EagerPrim, LogicalOperantsTest) {
paddle::prim::InitTensorOperants();
// 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
1 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
1 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once
paddle::Tensor out0 = tensor0 & tensor1;
......@@ -168,19 +170,21 @@ TEST(EagerPrim, CompareOperantsTest) {
paddle::prim::InitTensorOperants();
// 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
1 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
1 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
0 /*value*/,
true /*is_leaf*/);
paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(),
phi::DataType::INT32,
phi::DataLayout::NCHW,
0 /*value*/,
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once
paddle::Tensor out0 = (tensor0 < tensor1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册