未验证 提交 77bef883 编写于 作者: G gouzil 提交者: GitHub

[build] fix no member named 'CreateTensorWithValue' (#55042)

上级 3e5018df
...@@ -44,15 +44,14 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT); ...@@ -44,15 +44,14 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT);
using namespace egr; // NOLINT using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT using namespace egr_utils_api; // NOLINT
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCPU) { TEST(Benchmark, EagerScaleCPU) {
// Prepare Device Contexts // Prepare Device Contexts
eager_test::InitEnv(paddle::platform::CPUPlace()); eager_test::InitEnv(paddle::platform::CPUPlace());
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -91,7 +90,8 @@ TEST(Benchmark, EagerMatmulCPU) { ...@@ -91,7 +90,8 @@ TEST(Benchmark, EagerMatmulCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -100,7 +100,8 @@ TEST(Benchmark, EagerMatmulCPU) { ...@@ -100,7 +100,8 @@ TEST(Benchmark, EagerMatmulCPU) {
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -141,7 +142,8 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) { ...@@ -141,7 +142,8 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -150,7 +152,8 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) { ...@@ -150,7 +152,8 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) {
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -191,7 +194,8 @@ TEST(Benchmark, EagerIntermediateMLPCPU) { ...@@ -191,7 +194,8 @@ TEST(Benchmark, EagerIntermediateMLPCPU) {
for (const std::string mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N}); paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -203,7 +207,8 @@ TEST(Benchmark, EagerIntermediateMLPCPU) { ...@@ -203,7 +207,8 @@ TEST(Benchmark, EagerIntermediateMLPCPU) {
std::vector<paddle::Tensor> Bs; std::vector<paddle::Tensor> Bs;
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) { for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K}); paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K});
paddle::Tensor W = CreateTensorWithValue(ddimW, paddle::Tensor W =
eager_test::CreateTensorWithValue(ddimW,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -212,7 +217,8 @@ TEST(Benchmark, EagerIntermediateMLPCPU) { ...@@ -212,7 +217,8 @@ TEST(Benchmark, EagerIntermediateMLPCPU) {
RetainGradForTensor(W); RetainGradForTensor(W);
paddle::framework::DDim ddimB = phi::make_ddim({MLP_K}); paddle::framework::DDim ddimB = phi::make_ddim({MLP_K});
paddle::Tensor B = CreateTensorWithValue(ddimB, paddle::Tensor B =
eager_test::CreateTensorWithValue(ddimB,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -45,14 +45,13 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT); ...@@ -45,14 +45,13 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCUDA) { TEST(Benchmark, EagerScaleCUDA) {
eager_test::InitEnv(paddle::platform::CUDAPlace()); eager_test::InitEnv(paddle::platform::CUDAPlace());
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -93,7 +92,8 @@ TEST(Benchmark, EagerMatmulCUDA) { ...@@ -93,7 +92,8 @@ TEST(Benchmark, EagerMatmulCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -102,7 +102,8 @@ TEST(Benchmark, EagerMatmulCUDA) { ...@@ -102,7 +102,8 @@ TEST(Benchmark, EagerMatmulCUDA) {
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -147,7 +148,8 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) { ...@@ -147,7 +148,8 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -156,7 +158,8 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) { ...@@ -156,7 +158,8 @@ TEST(Benchmark, EagerIntermediateMatmulCUDA) {
RetainGradForTensor(X); RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({2, 2}); paddle::framework::DDim ddimY = phi::make_ddim({2, 2});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -201,7 +204,8 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) { ...@@ -201,7 +204,8 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) {
for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) { for (const std::string mode : {"Accuracy", "WarmUp", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N}); paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -213,7 +217,8 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) { ...@@ -213,7 +217,8 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) {
std::vector<paddle::Tensor> Bs; std::vector<paddle::Tensor> Bs;
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) { for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K}); paddle::framework::DDim ddimW = phi::make_ddim({MLP_N, MLP_K});
paddle::Tensor W = CreateTensorWithValue(ddimW, paddle::Tensor W =
eager_test::CreateTensorWithValue(ddimW,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -222,7 +227,8 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) { ...@@ -222,7 +227,8 @@ TEST(Benchmark, EagerIntermediateMLPCUDA) {
RetainGradForTensor(W); RetainGradForTensor(W);
paddle::framework::DDim ddimB = phi::make_ddim({MLP_K}); paddle::framework::DDim ddimB = phi::make_ddim({MLP_K});
paddle::Tensor B = CreateTensorWithValue(ddimB, paddle::Tensor B =
eager_test::CreateTensorWithValue(ddimB,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -31,8 +31,6 @@ ...@@ -31,8 +31,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Backward, SingleNodeEmptyGrad) { TEST(Backward, SingleNodeEmptyGrad) {
...@@ -44,7 +42,7 @@ TEST(Backward, SingleNodeEmptyGrad) { ...@@ -44,7 +42,7 @@ TEST(Backward, SingleNodeEmptyGrad) {
// Create Target Tensor // Create Target Tensor
paddle::Tensor target_tensor = paddle::Tensor target_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -95,7 +93,8 @@ TEST(Backward, SingleNodeCustomGrad) { ...@@ -95,7 +93,8 @@ TEST(Backward, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -106,7 +105,7 @@ TEST(Backward, SingleNodeCustomGrad) { ...@@ -106,7 +105,7 @@ TEST(Backward, SingleNodeCustomGrad) {
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor // Create Grad Tensor
paddle::Tensor grad_tensor = paddle::Tensor grad_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -166,7 +165,8 @@ TEST(Backward, LinearNodes) { ...@@ -166,7 +165,8 @@ TEST(Backward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -241,13 +241,15 @@ TEST(Backward, WithAccumulation) { ...@@ -241,13 +241,15 @@ TEST(Backward, WithAccumulation) {
// Create Target Tensor // Create Target Tensor
std::vector<paddle::Tensor> target_tensors; std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -259,14 +261,14 @@ TEST(Backward, WithAccumulation) { ...@@ -259,14 +261,14 @@ TEST(Backward, WithAccumulation) {
// Create Grad Tensor // Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 = paddle::Tensor grad_tensor0 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor grad_tensor1 = paddle::Tensor grad_tensor1 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Forward, SingleNode) { TEST(Forward, SingleNode) {
...@@ -40,7 +38,8 @@ TEST(Forward, SingleNode) { ...@@ -40,7 +38,8 @@ TEST(Forward, SingleNode) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -89,7 +88,8 @@ TEST(Forward, LinearNodes) { ...@@ -89,7 +88,8 @@ TEST(Forward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -174,7 +174,8 @@ TEST(Forward, BranchedNodes) { ...@@ -174,7 +174,8 @@ TEST(Forward, BranchedNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -35,8 +35,6 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT); ...@@ -35,8 +35,6 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT);
#endif #endif
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -67,7 +65,8 @@ TEST(FwdBwdJoint, SingleNode) { ...@@ -67,7 +65,8 @@ TEST(FwdBwdJoint, SingleNode) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -110,7 +109,8 @@ TEST(FwdBwdJoint, LinearNodes) { ...@@ -110,7 +109,8 @@ TEST(FwdBwdJoint, LinearNodes) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -163,7 +163,8 @@ TEST(FwdBwdJoint, BranchedNodes) { ...@@ -163,7 +163,8 @@ TEST(FwdBwdJoint, BranchedNodes) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -235,7 +236,8 @@ TEST(FwdBwdJoint, GradientHook) { ...@@ -235,7 +236,8 @@ TEST(FwdBwdJoint, GradientHook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -307,7 +309,8 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { ...@@ -307,7 +309,8 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -361,7 +364,8 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { ...@@ -361,7 +364,8 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -401,7 +405,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { ...@@ -401,7 +405,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CUDAPlace(), paddle::platform::CUDAPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -34,8 +34,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT); ...@@ -34,8 +34,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Generated, Sigmoid) { TEST(Generated, Sigmoid) {
...@@ -45,7 +43,8 @@ TEST(Generated, Sigmoid) { ...@@ -45,7 +43,8 @@ TEST(Generated, Sigmoid) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make Dim"; VLOG(6) << "Make Dim";
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -75,7 +74,8 @@ TEST(Generated, Matmul_v2) { ...@@ -75,7 +74,8 @@ TEST(Generated, Matmul_v2) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -84,7 +84,8 @@ TEST(Generated, Matmul_v2) { ...@@ -84,7 +84,8 @@ TEST(Generated, Matmul_v2) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -113,7 +114,8 @@ TEST(Generated, ElementwiseAdd) { ...@@ -113,7 +114,8 @@ TEST(Generated, ElementwiseAdd) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -122,7 +124,8 @@ TEST(Generated, ElementwiseAdd) { ...@@ -122,7 +124,8 @@ TEST(Generated, ElementwiseAdd) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16}); paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Grad, SingleNodeEmptyGrad) { TEST(Grad, SingleNodeEmptyGrad) {
...@@ -43,7 +41,7 @@ TEST(Grad, SingleNodeEmptyGrad) { ...@@ -43,7 +41,7 @@ TEST(Grad, SingleNodeEmptyGrad) {
// Create Target Tensor (output) // Create Target Tensor (output)
paddle::Tensor output_tensor = paddle::Tensor output_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -52,7 +50,7 @@ TEST(Grad, SingleNodeEmptyGrad) { ...@@ -52,7 +50,7 @@ TEST(Grad, SingleNodeEmptyGrad) {
// Create input tensor // Create input tensor
const paddle::Tensor leaf_tensor = const paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -109,7 +107,8 @@ TEST(Grad, SingleNodeCustomGrad) { ...@@ -109,7 +107,8 @@ TEST(Grad, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -120,7 +119,7 @@ TEST(Grad, SingleNodeCustomGrad) { ...@@ -120,7 +119,7 @@ TEST(Grad, SingleNodeCustomGrad) {
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor // Create Grad Tensor
paddle::Tensor grad_tensor = paddle::Tensor grad_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -129,7 +128,7 @@ TEST(Grad, SingleNodeCustomGrad) { ...@@ -129,7 +128,7 @@ TEST(Grad, SingleNodeCustomGrad) {
grad_tensors.emplace_back(std::move(grad_tensor)); grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor = paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -187,7 +186,8 @@ TEST(Grad, LinearNodes) { ...@@ -187,7 +186,8 @@ TEST(Grad, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -196,7 +196,7 @@ TEST(Grad, LinearNodes) { ...@@ -196,7 +196,7 @@ TEST(Grad, LinearNodes) {
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor = paddle::Tensor leaf_tensor =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -268,13 +268,15 @@ TEST(Grad, WithAccumulation) { ...@@ -268,13 +268,15 @@ TEST(Grad, WithAccumulation) {
// Create Target Tensor // Create Target Tensor
std::vector<paddle::Tensor> target_tensors; std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -286,14 +288,14 @@ TEST(Grad, WithAccumulation) { ...@@ -286,14 +288,14 @@ TEST(Grad, WithAccumulation) {
// Create Grad Tensor // Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 = paddle::Tensor grad_tensor0 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor grad_tensor1 = paddle::Tensor grad_tensor1 =
CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -65,7 +63,8 @@ TEST(RetainGrad, HookBeforeRetainGrad) { ...@@ -65,7 +63,8 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -140,7 +139,8 @@ TEST(RetainGrad, HookAfterRetainGrad) { ...@@ -140,7 +139,8 @@ TEST(RetainGrad, HookAfterRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -33,8 +33,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT); ...@@ -33,8 +33,6 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -69,7 +67,8 @@ void test_sigmoid(bool is_remove_gradient_hook) { ...@@ -69,7 +67,8 @@ void test_sigmoid(bool is_remove_gradient_hook) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -133,7 +132,8 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) { ...@@ -133,7 +132,8 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -142,7 +142,8 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) { ...@@ -142,7 +142,8 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16}); paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -195,7 +196,8 @@ void test_matmul(bool is_remove_gradient_hook) { ...@@ -195,7 +196,8 @@ void test_matmul(bool is_remove_gradient_hook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -204,7 +206,8 @@ void test_matmul(bool is_remove_gradient_hook) { ...@@ -204,7 +206,8 @@ void test_matmul(bool is_remove_gradient_hook) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -256,7 +259,8 @@ void test_backward_final_hooks() { ...@@ -256,7 +259,8 @@ void test_backward_final_hooks() {
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X =
eager_test::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -265,7 +269,8 @@ void test_backward_final_hooks() { ...@@ -265,7 +269,8 @@ void test_backward_final_hooks() {
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::Tensor Y = CreateTensorWithValue(ddimY, paddle::Tensor Y =
eager_test::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -24,8 +24,6 @@ ...@@ -24,8 +24,6 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(TensorUtils, Test) { TEST(TensorUtils, Test) {
...@@ -37,14 +35,16 @@ TEST(TensorUtils, Test) { ...@@ -37,14 +35,16 @@ TEST(TensorUtils, Test) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = CreateTensorWithValue(ddim, paddle::Tensor t =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
paddle::Tensor t_grad = CreateTensorWithValue(ddim, paddle::Tensor t_grad =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -65,8 +65,6 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT); ...@@ -65,8 +65,6 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT);
#endif #endif
using eager_test::CreateTensorWithValue;
namespace paddle { namespace paddle {
namespace prim { namespace prim {
...@@ -77,14 +75,16 @@ TEST(EagerPrim, TanhBackwardTest) { ...@@ -77,14 +75,16 @@ TEST(EagerPrim, TanhBackwardTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -132,14 +132,16 @@ TEST(EagerPrim, LogicalOperantsTest) { ...@@ -132,14 +132,16 @@ TEST(EagerPrim, LogicalOperantsTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::INT32, phi::DataType::INT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1 /*value*/, 1 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::INT32, phi::DataType::INT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -168,14 +170,16 @@ TEST(EagerPrim, CompareOperantsTest) { ...@@ -168,14 +170,16 @@ TEST(EagerPrim, CompareOperantsTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = CreateTensorWithValue(ddim, paddle::Tensor tensor0 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::INT32, phi::DataType::INT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1 /*value*/, 1 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = CreateTensorWithValue(ddim, paddle::Tensor tensor1 =
eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::INT32, phi::DataType::INT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册