Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
3d2ec707
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3d2ec707
编写于
11月 30, 2021
作者:
Z
Zhanlue Yang
提交者:
GitHub
11月 30, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Eager dygraph egr_utils_api namespace refactor (#37654)
* Refactored eager legacy namespace * Fixed namespace issues
上级
eb9e3305
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
158 addition
and
172 deletion
+158
-172
paddle/fluid/eager/api/utils/hook_utils.cc
paddle/fluid/eager/api/utils/hook_utils.cc
+2
-0
paddle/fluid/eager/api/utils/hook_utils.h
paddle/fluid/eager/api/utils/hook_utils.h
+2
-0
paddle/fluid/eager/api/utils/tensor_utils.cc
paddle/fluid/eager/api/utils/tensor_utils.cc
+2
-0
paddle/fluid/eager/api/utils/tensor_utils.h
paddle/fluid/eager/api/utils/tensor_utils.h
+2
-0
paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc
...luid/eager/tests/performance_tests/benchmark_eager_cpu.cc
+2
-2
paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc
...uid/eager/tests/performance_tests/benchmark_eager_cuda.cc
+2
-2
paddle/fluid/eager/tests/task_tests/backward_test.cc
paddle/fluid/eager/tests/task_tests/backward_test.cc
+22
-24
paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc
...d/eager/tests/task_tests/cross_batch_accumulation_test.cc
+10
-12
paddle/fluid/eager/tests/task_tests/eager_utils_test.cc
paddle/fluid/eager/tests/task_tests/eager_utils_test.cc
+5
-9
paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc
paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc
+14
-17
paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc
paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc
+49
-52
paddle/fluid/eager/tests/task_tests/generated_test.cc
paddle/fluid/eager/tests/task_tests/generated_test.cc
+15
-18
paddle/fluid/eager/tests/task_tests/hook_test.cc
paddle/fluid/eager/tests/task_tests/hook_test.cc
+23
-23
paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc
paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc
+8
-13
未找到文件。
paddle/fluid/eager/api/utils/hook_utils.cc
浏览文件 @
3d2ec707
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/dense_tensor.h"
namespace
egr
{
namespace
egr
{
namespace
egr_utils_api
{
void
RegisterGradientHookForTensor
(
void
RegisterGradientHookForTensor
(
const
egr
::
EagerTensor
&
tensor
,
const
egr
::
EagerTensor
&
tensor
,
...
@@ -90,4 +91,5 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
...
@@ -90,4 +91,5 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
}
}
}
}
}
// namespace egr_utils_api
}
// namespace egr
}
// namespace egr
paddle/fluid/eager/api/utils/hook_utils.h
浏览文件 @
3d2ec707
...
@@ -18,6 +18,7 @@
...
@@ -18,6 +18,7 @@
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/pten/api/all.h"
#include "paddle/pten/api/all.h"
namespace
egr
{
namespace
egr
{
namespace
egr_utils_api
{
void
RegisterGradientHookForTensor
(
void
RegisterGradientHookForTensor
(
const
egr
::
EagerTensor
&
tensor
,
const
egr
::
EagerTensor
&
tensor
,
...
@@ -27,4 +28,5 @@ void RegisterReduceHookForTensor(const egr::EagerTensor& tensor,
...
@@ -27,4 +28,5 @@ void RegisterReduceHookForTensor(const egr::EagerTensor& tensor,
const
std
::
function
<
void
(
void
)
>&
hook
);
const
std
::
function
<
void
(
void
)
>&
hook
);
void
RetainGradForTensor
(
const
egr
::
EagerTensor
&
tensor
);
void
RetainGradForTensor
(
const
egr
::
EagerTensor
&
tensor
);
}
// namespace egr_utils_api
}
// namespace egr
}
// namespace egr
paddle/fluid/eager/api/utils/tensor_utils.cc
浏览文件 @
3d2ec707
...
@@ -26,6 +26,7 @@
...
@@ -26,6 +26,7 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/framework/variable.h"
namespace
egr
{
namespace
egr
{
namespace
egr_utils_api
{
bool
IsLeafTensor
(
const
egr
::
EagerTensor
&
target
)
{
bool
IsLeafTensor
(
const
egr
::
EagerTensor
&
target
)
{
std
::
shared_ptr
<
GradNodeBase
>
grad_node
=
EagerUtils
::
grad_node
(
target
);
std
::
shared_ptr
<
GradNodeBase
>
grad_node
=
EagerUtils
::
grad_node
(
target
);
...
@@ -58,4 +59,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
...
@@ -58,4 +59,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
return
out
;
return
out
;
}
}
}
// namespace egr_utils_api
}
// namespace egr
}
// namespace egr
paddle/fluid/eager/api/utils/tensor_utils.h
浏览文件 @
3d2ec707
...
@@ -18,6 +18,7 @@
...
@@ -18,6 +18,7 @@
#include "paddle/pten/api/all.h"
#include "paddle/pten/api/all.h"
namespace
egr
{
namespace
egr
{
namespace
egr_utils_api
{
// If and only if the tensor holds an AccumulationNode
// If and only if the tensor holds an AccumulationNode
// Then it's treated as a leaf tensor
// Then it's treated as a leaf tensor
...
@@ -29,4 +30,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
...
@@ -29,4 +30,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
const
pten
::
DataLayout
&
layout
,
const
pten
::
DataLayout
&
layout
,
float
value
,
bool
is_leaf
=
true
);
float
value
,
bool
is_leaf
=
true
);
}
// namespace egr_utils_api
}
// namespace egr
}
// namespace egr
paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc
浏览文件 @
3d2ec707
...
@@ -32,8 +32,8 @@
...
@@ -32,8 +32,8 @@
#include "gperftools/profiler.h"
#include "gperftools/profiler.h"
#endif
#endif
// TODO(jiabin): remove nolint here!!!
using
namespace
egr
;
// NOLINT
using
namespace
egr
;
// NOLINT
using
namespace
egr
_utils_api
;
// NOLINT
// Disable pten path
// Disable pten path
DECLARE_bool
(
run_pten_kernel
);
DECLARE_bool
(
run_pten_kernel
);
...
...
paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc
浏览文件 @
3d2ec707
...
@@ -31,8 +31,8 @@
...
@@ -31,8 +31,8 @@
#include "gperftools/profiler.h"
#include "gperftools/profiler.h"
#endif
#endif
// TODO(jiabin): remove nolint here!!!
using
namespace
egr
;
// NOLINT
using
namespace
egr
;
// NOLINT
using
namespace
egr
_utils_api
;
// NOLINT
DECLARE_bool
(
run_pten_kernel
);
DECLARE_bool
(
run_pten_kernel
);
...
...
paddle/fluid/eager/tests/task_tests/backward_test.cc
浏览文件 @
3d2ec707
...
@@ -30,19 +30,17 @@
...
@@ -30,19 +30,17 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_meta.h"
#include "paddle/pten/core/tensor_meta.h"
using
namespace
egr
;
// NOLINT
namespace
egr
{
namespace
eager_test
{
TEST
(
Backward
,
SingleNodeEmptyGrad
)
{
TEST
(
Backward
,
SingleNodeEmptyGrad
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
target_tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
target_tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
...
@@ -67,7 +65,7 @@ TEST(Backward, SingleNodeEmptyGrad) {
...
@@ -67,7 +65,7 @@ TEST(Backward, SingleNodeEmptyGrad) {
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
egr
::
RetainGradForTensor
(
leaf_tensor
);
egr
_utils_api
::
RetainGradForTensor
(
leaf_tensor
);
// Connect Node0 -> AccumulationNode via Edge
// Connect Node0 -> AccumulationNode via Edge
auto
meta
=
egr
::
AutogradMeta
();
auto
meta
=
egr
::
AutogradMeta
();
...
@@ -80,26 +78,26 @@ TEST(Backward, SingleNodeEmptyGrad) {
...
@@ -80,26 +78,26 @@ TEST(Backward, SingleNodeEmptyGrad) {
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Check Output Value
// Check Output Value
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
5.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
5.0
);
}
}
TEST
(
Backward
,
SingleNodeCustomGrad
)
{
TEST
(
Backward
,
SingleNodeCustomGrad
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
std
::
vector
<
egr
::
EagerTensor
>
grad_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
grad_tensors
;
// Create Grad Tensor
// Create Grad Tensor
egr
::
EagerTensor
grad_tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
grad_tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
10.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
10.0
/*value*/
,
false
/*is_leaf*/
);
grad_tensors
.
emplace_back
(
std
::
move
(
grad_tensor
));
grad_tensors
.
emplace_back
(
std
::
move
(
grad_tensor
));
...
@@ -128,7 +126,7 @@ TEST(Backward, SingleNodeCustomGrad) {
...
@@ -128,7 +126,7 @@ TEST(Backward, SingleNodeCustomGrad) {
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
egr
::
RetainGradForTensor
(
leaf_tensor
);
egr
_utils_api
::
RetainGradForTensor
(
leaf_tensor
);
// Connect Node0 -> AccumulationNode via Edge
// Connect Node0 -> AccumulationNode via Edge
auto
meta
=
egr
::
AutogradMeta
();
auto
meta
=
egr
::
AutogradMeta
();
...
@@ -141,7 +139,7 @@ TEST(Backward, SingleNodeCustomGrad) {
...
@@ -141,7 +139,7 @@ TEST(Backward, SingleNodeCustomGrad) {
RunBackward
(
target_tensors
,
grad_tensors
);
RunBackward
(
target_tensors
,
grad_tensors
);
// Check Output Value
// Check Output Value
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
50.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
50.0
);
}
}
/*
/*
...
@@ -153,14 +151,14 @@ Node0
...
@@ -153,14 +151,14 @@ Node0
*/
*/
TEST
(
Backward
,
LinearNodes
)
{
TEST
(
Backward
,
LinearNodes
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
...
@@ -202,7 +200,7 @@ TEST(Backward, LinearNodes) {
...
@@ -202,7 +200,7 @@ TEST(Backward, LinearNodes) {
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
egr
::
RetainGradForTensor
(
leaf_tensor
);
egr
_utils_api
::
RetainGradForTensor
(
leaf_tensor
);
// Connect Node1 -> AccumulationNode via Edge
// Connect Node1 -> AccumulationNode via Edge
auto
meta1
=
egr
::
AutogradMeta
();
auto
meta1
=
egr
::
AutogradMeta
();
...
@@ -215,7 +213,7 @@ TEST(Backward, LinearNodes) {
...
@@ -215,7 +213,7 @@ TEST(Backward, LinearNodes) {
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
// Check Output Value
// Check Output Value
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
50.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
50.0
);
}
}
/*
/*
...
@@ -227,17 +225,17 @@ Node0 Node1
...
@@ -227,17 +225,17 @@ Node0 Node1
*/
*/
TEST
(
Backward
,
WithAccumulation
)
{
TEST
(
Backward
,
WithAccumulation
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
egr
::
EagerTensor
tensor0
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor0
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
egr
::
EagerTensor
tensor1
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor1
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
tensor0
));
target_tensors
.
emplace_back
(
std
::
move
(
tensor0
));
...
@@ -245,10 +243,10 @@ TEST(Backward, WithAccumulation) {
...
@@ -245,10 +243,10 @@ TEST(Backward, WithAccumulation) {
// Create Grad Tensor
// Create Grad Tensor
std
::
vector
<
egr
::
EagerTensor
>
grad_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
grad_tensors
;
egr
::
EagerTensor
grad_tensor0
=
CreateTensorWithValue
(
egr
::
EagerTensor
grad_tensor0
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
egr
::
EagerTensor
grad_tensor1
=
CreateTensorWithValue
(
egr
::
EagerTensor
grad_tensor1
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
10.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
10.0
/*value*/
,
false
/*is_leaf*/
);
grad_tensors
.
emplace_back
(
std
::
move
(
grad_tensor0
));
grad_tensors
.
emplace_back
(
std
::
move
(
grad_tensor0
));
...
@@ -303,7 +301,7 @@ TEST(Backward, WithAccumulation) {
...
@@ -303,7 +301,7 @@ TEST(Backward, WithAccumulation) {
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
auto_grad_meta2
->
SetSingleOutRankWithSlot
(
0
,
0
);
auto_grad_meta2
->
SetSingleOutRankWithSlot
(
0
,
0
);
egr
::
RetainGradForTensor
(
leaf_tensor
);
egr
_utils_api
::
RetainGradForTensor
(
leaf_tensor
);
// Connect Node2 -> AccumulationNode via Edge
// Connect Node2 -> AccumulationNode via Edge
auto
meta2
=
egr
::
AutogradMeta
();
auto
meta2
=
egr
::
AutogradMeta
();
...
@@ -314,7 +312,7 @@ TEST(Backward, WithAccumulation) {
...
@@ -314,7 +312,7 @@ TEST(Backward, WithAccumulation) {
RunBackward
(
target_tensors
,
grad_tensors
);
RunBackward
(
target_tensors
,
grad_tensors
);
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
2500.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
2500.0
);
}
}
}
// namespace e
ager_test
}
// namespace e
gr
paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc
浏览文件 @
3d2ec707
...
@@ -31,17 +31,15 @@
...
@@ -31,17 +31,15 @@
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/eager/tests/test_utils.h"
using
namespace
egr
;
// NOLINT
namespace
egr
{
namespace
eager_test
{
TEST
(
CrossBatchAccumulation
,
SingleScaleNode
)
{
TEST
(
CrossBatchAccumulation
,
SingleScaleNode
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
...
@@ -60,7 +58,7 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
...
@@ -60,7 +58,7 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
auto_grad_meta
->
SetGradNode
(
auto_grad_meta
->
SetGradNode
(
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
scale_node_ptr
));
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
scale_node_ptr
));
auto_grad_meta
->
SetSingleOutRankWithSlot
(
0
,
0
);
auto_grad_meta
->
SetSingleOutRankWithSlot
(
0
,
0
);
RetainGradForTensor
(
target_tensor
);
// result: 1.0
egr_utils_api
::
RetainGradForTensor
(
target_tensor
);
// result: 1.0
auto
meta
=
AutogradMeta
();
auto
meta
=
AutogradMeta
();
meta
.
SetSingleOutRankWithSlot
(
0
,
0
);
meta
.
SetSingleOutRankWithSlot
(
0
,
0
);
...
@@ -71,18 +69,18 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
...
@@ -71,18 +69,18 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
auto_grad_meta1
->
SetGradNode
(
auto_grad_meta1
->
SetGradNode
(
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
std
::
dynamic_pointer_cast
<
GradNodeBase
>
(
acc_node_ptr
));
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
auto_grad_meta1
->
SetSingleOutRankWithSlot
(
0
,
0
);
RetainGradForTensor
(
leaf_tensor
);
egr_utils_api
::
RetainGradForTensor
(
leaf_tensor
);
}
}
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
1.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
1.0
);
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
5.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
5.0
);
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
1.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
1.0
);
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
10.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
10.0
);
}
}
}
// namespace e
ager_test
}
// namespace e
gr
paddle/fluid/eager/tests/task_tests/eager_utils_test.cc
浏览文件 @
3d2ec707
...
@@ -24,10 +24,7 @@
...
@@ -24,10 +24,7 @@
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/allocator.h"
// TODO(jiabin): remove nolint here!!!
namespace
egr
{
using
namespace
egr
;
// NOLINT
namespace
eager_test
{
TEST
(
EagerUtils
,
AutoGradMeta
)
{
TEST
(
EagerUtils
,
AutoGradMeta
)
{
// Construct Eager Tensor
// Construct Eager Tensor
...
@@ -167,7 +164,7 @@ TEST(EagerUtils, PassStopGradient) {
...
@@ -167,7 +164,7 @@ TEST(EagerUtils, PassStopGradient) {
TEST
(
EagerUtils
,
SyncToVarsSingle
)
{
TEST
(
EagerUtils
,
SyncToVarsSingle
)
{
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
2
,
4
,
4
,
4
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
2
,
4
,
4
,
4
});
auto
tensor
=
eager_test
::
CreateTestCPUTensor
(
5.0
f
,
ddim
);
auto
tensor
=
CreateTestCPUTensor
(
5.0
f
,
ddim
);
std
::
vector
<
std
::
shared_ptr
<
egr
::
EagerTensor
>>
var_bases
=
std
::
vector
<
std
::
shared_ptr
<
egr
::
EagerTensor
>>
var_bases
=
egr
::
EagerUtils
::
SyncToVars
(
tensor
);
egr
::
EagerUtils
::
SyncToVars
(
tensor
);
...
@@ -185,9 +182,8 @@ TEST(EagerUtils, SyncToVarsSingle) {
...
@@ -185,9 +182,8 @@ TEST(EagerUtils, SyncToVarsSingle) {
TEST
(
EagerUtils
,
SyncToVarsMultiple
)
{
TEST
(
EagerUtils
,
SyncToVarsMultiple
)
{
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
2
,
4
,
4
,
4
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
2
,
4
,
4
,
4
});
std
::
vector
<
egr
::
EagerTensor
>
tensors
=
{
std
::
vector
<
egr
::
EagerTensor
>
tensors
=
{
CreateTestCPUTensor
(
1.0
f
,
ddim
),
eager_test
::
CreateTestCPUTensor
(
1.0
f
,
ddim
),
CreateTestCPUTensor
(
2.0
f
,
ddim
)};
eager_test
::
CreateTestCPUTensor
(
2.0
f
,
ddim
)};
std
::
vector
<
std
::
shared_ptr
<
egr
::
EagerTensor
>>
var_bases
=
std
::
vector
<
std
::
shared_ptr
<
egr
::
EagerTensor
>>
var_bases
=
egr
::
EagerUtils
::
SyncToVars
(
tensors
);
egr
::
EagerUtils
::
SyncToVars
(
tensors
);
...
@@ -280,4 +276,4 @@ TEST(EagerUtils, ConstructDuplicableOutput) {
...
@@ -280,4 +276,4 @@ TEST(EagerUtils, ConstructDuplicableOutput) {
CHECK
(
outs
[
0
]
->
initialized
()
==
false
);
CHECK
(
outs
[
0
]
->
initialized
()
==
false
);
}
}
}
// namespace e
ager_test
}
// namespace e
gr
paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc
浏览文件 @
3d2ec707
...
@@ -27,21 +27,18 @@
...
@@ -27,21 +27,18 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_meta.h"
#include "paddle/pten/core/tensor_meta.h"
// TODO(jiabin): remove nolint here!!!
namespace
egr
{
using
namespace
egr
;
// NOLINT
namespace
eager_test
{
TEST
(
Forward
,
SingleNode
)
{
TEST
(
Forward
,
SingleNode
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
t
=
CreateTensorWithValue
(
egr
::
EagerTensor
t
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
t
));
target_tensors
.
emplace_back
(
std
::
move
(
t
));
...
@@ -55,7 +52,7 @@ TEST(Forward, SingleNode) {
...
@@ -55,7 +52,7 @@ TEST(Forward, SingleNode) {
tensor
,
scale
,
bias
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
tensor
,
scale
,
bias
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output
// Examine Forward Output
CompareTensorWithValue
<
float
>
(
out
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out
,
13.0
);
// Examine GradNode
// Examine GradNode
{
{
...
@@ -80,14 +77,14 @@ Node1
...
@@ -80,14 +77,14 @@ Node1
out
out
*/
*/
TEST
(
Forward
,
LinearNodes
)
{
TEST
(
Forward
,
LinearNodes
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
t
=
CreateTensorWithValue
(
egr
::
EagerTensor
t
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
t
));
target_tensors
.
emplace_back
(
std
::
move
(
t
));
...
@@ -108,10 +105,10 @@ TEST(Forward, LinearNodes) {
...
@@ -108,10 +105,10 @@ TEST(Forward, LinearNodes) {
out0
,
scale1
,
bias1
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale1
,
bias1
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output 0
// Examine Forward Output 0
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
// Examine Forward Output 1
// Examine Forward Output 1
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
// Examine GradNode
// Examine GradNode
{
{
...
@@ -156,14 +153,14 @@ TEST(Forward, LinearNodes) {
...
@@ -156,14 +153,14 @@ TEST(Forward, LinearNodes) {
out1 out2
out1 out2
*/
*/
TEST
(
Forward
,
BranchedNodes
)
{
TEST
(
Forward
,
BranchedNodes
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
t
=
CreateTensorWithValue
(
egr
::
EagerTensor
t
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
t
));
target_tensors
.
emplace_back
(
std
::
move
(
t
));
...
@@ -190,13 +187,13 @@ TEST(Forward, BranchedNodes) {
...
@@ -190,13 +187,13 @@ TEST(Forward, BranchedNodes) {
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output 0
// Examine Forward Output 0
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
// Examine Forward Output 1
// Examine Forward Output 1
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
// Examine Forward Output 2
// Examine Forward Output 2
CompareTensorWithValue
<
float
>
(
out2
,
150.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out2
,
150.0
);
// Examine GradNode
// Examine GradNode
{
{
...
@@ -248,4 +245,4 @@ TEST(Forward, BranchedNodes) {
...
@@ -248,4 +245,4 @@ TEST(Forward, BranchedNodes) {
}
}
}
}
}
// namespace e
ager_test
}
// namespace e
gr
paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc
浏览文件 @
3d2ec707
...
@@ -29,10 +29,7 @@
...
@@ -29,10 +29,7 @@
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/eager/tests/test_utils.h"
// TODO(jiabin): remove nolint here!!!
namespace
egr
{
using
namespace
egr
;
// NOLINT
namespace
eager_test
{
egr
::
EagerTensor
hook_function
(
const
egr
::
EagerTensor
&
t
)
{
egr
::
EagerTensor
hook_function
(
const
egr
::
EagerTensor
&
t
)
{
auto
t_dense
=
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
t
.
impl
());
auto
t_dense
=
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
t
.
impl
());
...
@@ -61,14 +58,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
...
@@ -61,14 +58,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
}
}
TEST
(
FwdBwdJoint
,
SingleNode
)
{
TEST
(
FwdBwdJoint
,
SingleNode
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
// 3. Run Forward
// 3. Run Forward
float
scale
=
2.0
;
float
scale
=
2.0
;
...
@@ -77,7 +74,7 @@ TEST(FwdBwdJoint, SingleNode) {
...
@@ -77,7 +74,7 @@ TEST(FwdBwdJoint, SingleNode) {
tensor
,
scale
,
bias
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
tensor
,
scale
,
bias
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output
// Examine Forward Output
CompareTensorWithValue
<
float
>
(
out
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out
,
13.0
);
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out
};
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out
};
// 4. Run Backward
// 4. Run Backward
...
@@ -88,7 +85,7 @@ TEST(FwdBwdJoint, SingleNode) {
...
@@ -88,7 +85,7 @@ TEST(FwdBwdJoint, SingleNode) {
EagerUtils
::
unsafe_autograd_meta
(
tensor
)
->
Grad
().
impl
())
EagerUtils
::
unsafe_autograd_meta
(
tensor
)
->
Grad
().
impl
())
->
data
<
float
>
()[
0
];
->
data
<
float
>
()[
0
];
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
2.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
2.0
);
}
}
/*
/*
...
@@ -101,14 +98,14 @@ Node1
...
@@ -101,14 +98,14 @@ Node1
out
out
*/
*/
TEST
(
FwdBwdJoint
,
LinearNodes
)
{
TEST
(
FwdBwdJoint
,
LinearNodes
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
// 3. Run Forward
// 3. Run Forward
// Run Forward Node 0
// Run Forward Node 0
...
@@ -125,17 +122,17 @@ TEST(FwdBwdJoint, LinearNodes) {
...
@@ -125,17 +122,17 @@ TEST(FwdBwdJoint, LinearNodes) {
out0
,
scale1
,
bias1
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale1
,
bias1
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output 0
// Examine Forward Output 0
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
// Examine Forward Output 1
// Examine Forward Output 1
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out1
};
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out1
};
// 4. Run Backward
// 4. Run Backward
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
10.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
10.0
);
}
}
/*
/*
...
@@ -149,14 +146,14 @@ TEST(FwdBwdJoint, LinearNodes) {
...
@@ -149,14 +146,14 @@ TEST(FwdBwdJoint, LinearNodes) {
out1 out2
out1 out2
*/
*/
TEST
(
FwdBwdJoint
,
BranchedNodes
)
{
TEST
(
FwdBwdJoint
,
BranchedNodes
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
// 3. Run Forward
// 3. Run Forward
// Run Forward Node 0
// Run Forward Node 0
...
@@ -179,10 +176,10 @@ TEST(FwdBwdJoint, BranchedNodes) {
...
@@ -179,10 +176,10 @@ TEST(FwdBwdJoint, BranchedNodes) {
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output 0
// Examine Forward Output 0
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
// Examine Forward Output 1
// Examine Forward Output 1
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
// Examine Forward Output 2
// Examine Forward Output 2
{
{
...
@@ -201,7 +198,7 @@ TEST(FwdBwdJoint, BranchedNodes) {
...
@@ -201,7 +198,7 @@ TEST(FwdBwdJoint, BranchedNodes) {
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
30.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
30.0
);
}
}
/*
/*
...
@@ -215,14 +212,14 @@ TEST(FwdBwdJoint, BranchedNodes) {
...
@@ -215,14 +212,14 @@ TEST(FwdBwdJoint, BranchedNodes) {
out1 out2
out1 out2
*/
*/
TEST
(
FwdBwdJoint
,
GradientHook
)
{
TEST
(
FwdBwdJoint
,
GradientHook
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
std
::
function
<
egr
::
EagerTensor
(
const
egr
::
EagerTensor
&
)
>
hook
=
std
::
function
<
egr
::
EagerTensor
(
const
egr
::
EagerTensor
&
)
>
hook
=
&
hook_function
;
&
hook_function
;
...
@@ -234,24 +231,24 @@ TEST(FwdBwdJoint, GradientHook) {
...
@@ -234,24 +231,24 @@ TEST(FwdBwdJoint, GradientHook) {
egr
::
EagerTensor
out0
=
egr
::
EagerTensor
out0
=
egr
::
scale
(
tensor
,
scale0
,
bias0
,
true
/*bias_after_scale*/
,
egr
::
scale
(
tensor
,
scale0
,
bias0
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
true
/*trace_backward*/
);
RetainGradForTensor
(
out0
);
// hook: +5
egr_utils_api
::
RetainGradForTensor
(
out0
);
// hook: +5
RegisterGradientHookForTensor
(
out0
,
hook
);
// hook: +5
egr_utils_api
::
RegisterGradientHookForTensor
(
out0
,
hook
);
// hook: +5
// Run Forward Node 1
// Run Forward Node 1
float
scale1
=
5.0
;
float
scale1
=
5.0
;
float
bias1
=
10.0
;
float
bias1
=
10.0
;
egr
::
EagerTensor
out1
=
egr
::
scale
(
egr
::
EagerTensor
out1
=
egr
::
scale
(
out0
,
scale1
,
bias1
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale1
,
bias1
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
RetainGradForTensor
(
out1
);
// hook: +5
egr_utils_api
::
RetainGradForTensor
(
out1
);
// hook: +5
RegisterGradientHookForTensor
(
out1
,
hook
);
// hook: +5
egr_utils_api
::
RegisterGradientHookForTensor
(
out1
,
hook
);
// hook: +5
// Run Forward Node 2
// Run Forward Node 2
float
scale2
=
10.0
;
float
scale2
=
10.0
;
float
bias2
=
20.0
;
float
bias2
=
20.0
;
egr
::
EagerTensor
out2
=
egr
::
scale
(
egr
::
EagerTensor
out2
=
egr
::
scale
(
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
RetainGradForTensor
(
out2
);
// hook: +5
egr_utils_api
::
RetainGradForTensor
(
out2
);
// hook: +5
RegisterGradientHookForTensor
(
out2
,
hook
);
// hook: +5
egr_utils_api
::
RegisterGradientHookForTensor
(
out2
,
hook
);
// hook: +5
// 4. Run Backward
// 4. Run Backward
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out1
,
out2
};
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out1
,
out2
};
...
@@ -259,16 +256,16 @@ TEST(FwdBwdJoint, GradientHook) {
...
@@ -259,16 +256,16 @@ TEST(FwdBwdJoint, GradientHook) {
// Examine Backward Grad
// Examine Backward Grad
// leaf grad
// leaf grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
190.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
190.0
);
// out0 grad
// out0 grad
CompareGradTensorWithValue
<
float
>
(
out0
,
90.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
out0
,
90.0
);
// out1 grad
// out1 grad
CompareGradTensorWithValue
<
float
>
(
out1
,
1.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
out1
,
1.0
);
// out2 grad
// out2 grad
CompareGradTensorWithValue
<
float
>
(
out2
,
1.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
out2
,
1.0
);
}
}
/*
/*
...
@@ -282,14 +279,14 @@ TEST(FwdBwdJoint, GradientHook) {
...
@@ -282,14 +279,14 @@ TEST(FwdBwdJoint, GradientHook) {
out1 out2
out1 out2
*/
*/
TEST
(
FwdBwdJoint
,
CrossBatchAccumulation
)
{
TEST
(
FwdBwdJoint
,
CrossBatchAccumulation
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
// 3. Run Forward
// 3. Run Forward
// Run Forward Node 0
// Run Forward Node 0
...
@@ -316,13 +313,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
...
@@ -316,13 +313,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
30.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
30.0
);
// Cross Batch Accumulation
// Cross Batch Accumulation
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
60.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
60.0
);
}
}
/* ---------------------------------------------------- */
/* ---------------------------------------------------- */
...
@@ -331,14 +328,14 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
...
@@ -331,14 +328,14 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST
(
FwdBwdJoint
,
SingleNodeCUDA
)
{
TEST
(
FwdBwdJoint
,
SingleNodeCUDA
)
{
InitEnv
(
paddle
::
platform
::
CUDAPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CUDAPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CUDAPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CUDAPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
// 3. Run Forward
// 3. Run Forward
float
scale
=
2.0
;
float
scale
=
2.0
;
...
@@ -347,14 +344,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
...
@@ -347,14 +344,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
tensor
,
scale
,
bias
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
tensor
,
scale
,
bias
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output
// Examine Forward Output
CompareTensorWithValue
<
float
>
(
out
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out
,
13.0
);
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out
};
std
::
vector
<
egr
::
EagerTensor
>
outs
=
{
out
};
// 4. Run Backward
// 4. Run Backward
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
2.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
2.0
);
}
}
/*
/*
...
@@ -368,14 +365,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
...
@@ -368,14 +365,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
out1 out2
out1 out2
*/
*/
TEST
(
FwdBwdJoint
,
BranchedNodesCUDA
)
{
TEST
(
FwdBwdJoint
,
BranchedNodesCUDA
)
{
InitEnv
(
paddle
::
platform
::
CUDAPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CUDAPlace
());
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CUDAPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CUDAPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
// 3. Run Forward
// 3. Run Forward
// Run Forward Node 0
// Run Forward Node 0
...
@@ -398,11 +395,11 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
...
@@ -398,11 +395,11 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
out0
,
scale2
,
bias2
,
true
/*bias_after_scale*/
,
true
/*trace_backward*/
);
// Examine Forward Output 0
// Examine Forward Output 0
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out0
,
13.0
);
// Examine Forward Output 1
// Examine Forward Output 1
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out1
,
75.0
);
// Examine Forward Output 2
// Examine Forward Output 2
CompareTensorWithValue
<
float
>
(
out2
,
150.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
out2
,
150.0
);
// TODO(jiabin): fix this with add functor
// TODO(jiabin): fix this with add functor
// 4. Run Backward
// 4. Run Backward
...
@@ -410,8 +407,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
...
@@ -410,8 +407,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
RunBackward
(
outs
,
{});
RunBackward
(
outs
,
{});
// Examine Backward Grad
// Examine Backward Grad
CompareGradTensorWithValue
<
float
>
(
tensor
,
30.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
tensor
,
30.0
);
}
}
#endif
#endif
}
// namespace e
ager_test
}
// namespace e
gr
paddle/fluid/eager/tests/task_tests/generated_test.cc
浏览文件 @
3d2ec707
...
@@ -30,66 +30,63 @@
...
@@ -30,66 +30,63 @@
#include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h"
#include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/core/kernel_registry.h"
// TODO(jiabin): remove nolint here!!!
namespace
egr
{
using
namespace
egr
;
// NOLINT
namespace
eager_test
{
TEST
(
Generated
,
Sigmoid
)
{
TEST
(
Generated
,
Sigmoid
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
VLOG
(
6
)
<<
"Init Env"
;
VLOG
(
6
)
<<
"Init Env"
;
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
2
,
4
,
4
,
4
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
2
,
4
,
4
,
4
});
VLOG
(
6
)
<<
"Make Dim"
;
VLOG
(
6
)
<<
"Make Dim"
;
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
0.0
,
true
);
pten
::
DataLayout
::
NCHW
,
0.0
,
true
);
VLOG
(
6
)
<<
"Make EagerTensor"
;
VLOG
(
6
)
<<
"Make EagerTensor"
;
RetainGradForTensor
(
tensor
);
egr_utils_api
::
RetainGradForTensor
(
tensor
);
VLOG
(
6
)
<<
"Retain Grad for Tensor"
;
VLOG
(
6
)
<<
"Retain Grad for Tensor"
;
auto
output_tensor
=
sigmoid_dygraph_function
(
tensor
,
{});
auto
output_tensor
=
sigmoid_dygraph_function
(
tensor
,
{});
VLOG
(
6
)
<<
"Run Backward"
;
VLOG
(
6
)
<<
"Run Backward"
;
CompareVariableWithValue
<
float
>
(
output_tensor
,
0.5
);
eager_test
::
CompareVariableWithValue
<
float
>
(
output_tensor
,
0.5
);
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
=
{
output_tensor
};
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
=
{
output_tensor
};
VLOG
(
6
)
<<
"Runing Backward"
;
VLOG
(
6
)
<<
"Runing Backward"
;
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
VLOG
(
6
)
<<
"Finish Backward"
;
VLOG
(
6
)
<<
"Finish Backward"
;
CompareGradVariableWithValue
<
float
>
(
tensor
,
0.25
);
eager_test
::
CompareGradVariableWithValue
<
float
>
(
tensor
,
0.25
);
}
}
TEST
(
Generated
,
Matmul_v2
)
{
TEST
(
Generated
,
Matmul_v2
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
auto
tracer
=
std
::
make_shared
<
paddle
::
imperative
::
Tracer
>
();
auto
tracer
=
std
::
make_shared
<
paddle
::
imperative
::
Tracer
>
();
paddle
::
imperative
::
SetCurrentTracer
(
tracer
);
paddle
::
imperative
::
SetCurrentTracer
(
tracer
);
// 1. Prepare Input
// 1. Prepare Input
paddle
::
framework
::
DDim
ddimX
=
paddle
::
framework
::
make_ddim
({
4
,
16
});
paddle
::
framework
::
DDim
ddimX
=
paddle
::
framework
::
make_ddim
({
4
,
16
});
egr
::
EagerTensor
X
=
CreateTensorWithValue
(
egr
::
EagerTensor
X
=
egr_utils_api
::
CreateTensorWithValue
(
ddimX
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddimX
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
3.0
,
true
);
pten
::
DataLayout
::
NCHW
,
3.0
,
true
);
RetainGradForTensor
(
X
);
egr_utils_api
::
RetainGradForTensor
(
X
);
paddle
::
framework
::
DDim
ddimY
=
paddle
::
framework
::
make_ddim
({
16
,
20
});
paddle
::
framework
::
DDim
ddimY
=
paddle
::
framework
::
make_ddim
({
16
,
20
});
egr
::
EagerTensor
Y
=
CreateTensorWithValue
(
egr
::
EagerTensor
Y
=
egr_utils_api
::
CreateTensorWithValue
(
ddimY
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddimY
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
2.0
,
true
);
pten
::
DataLayout
::
NCHW
,
2.0
,
true
);
RetainGradForTensor
(
Y
);
egr_utils_api
::
RetainGradForTensor
(
Y
);
auto
output_tensor
=
matmul_v2_dygraph_function
(
auto
output_tensor
=
matmul_v2_dygraph_function
(
X
,
Y
,
{{
"trans_x"
,
false
},
{
"trans_y"
,
false
}});
X
,
Y
,
{{
"trans_x"
,
false
},
{
"trans_y"
,
false
}});
CompareVariableWithValue
<
float
>
(
output_tensor
,
96
);
eager_test
::
CompareVariableWithValue
<
float
>
(
output_tensor
,
96
);
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
=
{
output_tensor
};
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
=
{
output_tensor
};
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
CompareGradVariableWithValue
<
float
>
(
X
,
2.0
*
20
);
eager_test
::
CompareGradVariableWithValue
<
float
>
(
X
,
2.0
*
20
);
CompareGradVariableWithValue
<
float
>
(
Y
,
3.0
*
4
);
eager_test
::
CompareGradVariableWithValue
<
float
>
(
Y
,
3.0
*
4
);
}
}
}
// namespace e
ager_test
}
// namespace e
gr
paddle/fluid/eager/tests/task_tests/hook_test.cc
浏览文件 @
3d2ec707
...
@@ -30,9 +30,7 @@
...
@@ -30,9 +30,7 @@
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/eager/tests/test_utils.h"
using
namespace
egr
;
// NOLINT
namespace
egr
{
namespace
eager_test
{
egr
::
EagerTensor
hook_function
(
const
egr
::
EagerTensor
&
t
)
{
egr
::
EagerTensor
hook_function
(
const
egr
::
EagerTensor
&
t
)
{
auto
t_dense
=
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
t
.
impl
());
auto
t_dense
=
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
t
.
impl
());
...
@@ -61,14 +59,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
...
@@ -61,14 +59,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
}
}
TEST
(
RetainGrad
,
HookBeforeRetainGrad
)
{
TEST
(
RetainGrad
,
HookBeforeRetainGrad
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
...
@@ -99,8 +97,9 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
...
@@ -99,8 +97,9 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
auto_grad_meta
));
auto_grad_meta
));
RegisterGradientHookForTensor
(
target_tensor
,
hook
);
egr_utils_api
::
RegisterGradientHookForTensor
(
target_tensor
,
hook
);
RetainGradForTensor
(
target_tensor
);
// result: 1.0 + 3.0 = 4.0
egr_utils_api
::
RetainGradForTensor
(
target_tensor
);
// result: 1.0 + 3.0 = 4.0
}
}
// Connect ScaleNode -> AccumulationNode via Edge
// Connect ScaleNode -> AccumulationNode via Edge
...
@@ -126,25 +125,26 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
...
@@ -126,25 +125,26 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
auto_grad_meta
));
auto_grad_meta
));
RegisterGradientHookForTensor
(
leaf_tensor
,
hook
);
egr_utils_api
::
RegisterGradientHookForTensor
(
leaf_tensor
,
hook
);
RetainGradForTensor
(
leaf_tensor
);
// result: 4.0*5.0 + 3.0 = 23.0
egr_utils_api
::
RetainGradForTensor
(
leaf_tensor
);
// result: 4.0*5.0 + 3.0 = 23.0
}
}
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
4.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
4.0
);
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
23.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
23.0
);
}
}
TEST
(
RetainGrad
,
HookAfterRetainGrad
)
{
TEST
(
RetainGrad
,
HookAfterRetainGrad
)
{
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
tensor
=
CreateTensorWithValue
(
egr
::
EagerTensor
tensor
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
target_tensors
.
emplace_back
(
std
::
move
(
tensor
));
...
@@ -173,8 +173,8 @@ TEST(RetainGrad, HookAfterRetainGrad) {
...
@@ -173,8 +173,8 @@ TEST(RetainGrad, HookAfterRetainGrad) {
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
auto_grad_meta
));
auto_grad_meta
));
RetainGradForTensor
(
target_tensor
);
// result: 1.0
egr_utils_api
::
RetainGradForTensor
(
target_tensor
);
// result: 1.0
RegisterGradientHookForTensor
(
target_tensor
,
hook
);
egr_utils_api
::
RegisterGradientHookForTensor
(
target_tensor
,
hook
);
}
}
// Connect ScaleNode -> AccumulationNode via Edge
// Connect ScaleNode -> AccumulationNode via Edge
...
@@ -200,15 +200,15 @@ TEST(RetainGrad, HookAfterRetainGrad) {
...
@@ -200,15 +200,15 @@ TEST(RetainGrad, HookAfterRetainGrad) {
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
std
::
dynamic_pointer_cast
<
paddle
::
experimental
::
AbstractAutogradMeta
>
(
auto_grad_meta
));
auto_grad_meta
));
RetainGradForTensor
(
leaf_tensor
);
// RetainGrad for leaf tensor gets
egr_utils_api
::
RetainGradForTensor
(
// postponed, result: 4.0*5.0 + 3.0 =
leaf_tensor
);
// RetainGrad for leaf tensor gets
// 23.0
// postponed, result: 4.0*5.0 + 3.0 =
RegisterGradientHookForTensor
(
leaf_tensor
,
hook
);
// 23.0
egr_utils_api
::
RegisterGradientHookForTensor
(
leaf_tensor
,
hook
);
}
}
RunBackward
(
target_tensors
,
{});
RunBackward
(
target_tensors
,
{});
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
1.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
target_tensor
,
1.0
);
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
23.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
leaf_tensor
,
23.0
);
}
}
}
// namespace egr
}
// namespace eager_test
paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc
浏览文件 @
3d2ec707
...
@@ -23,39 +23,34 @@
...
@@ -23,39 +23,34 @@
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/kernel_registry.h"
namespace
egr
{
// TODO(jiabin): remove nolint here!!!
using
namespace
egr
;
// NOLINT
namespace
eager_test
{
TEST
(
TensorUtils
,
Test
)
{
TEST
(
TensorUtils
,
Test
)
{
// Prepare Device Contexts
// Prepare Device Contexts
InitEnv
(
paddle
::
platform
::
CPUPlace
());
eager_test
::
InitEnv
(
paddle
::
platform
::
CPUPlace
());
// Prepare Inputs
// Prepare Inputs
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
std
::
vector
<
egr
::
EagerTensor
>
target_tensors
;
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
paddle
::
framework
::
DDim
ddim
=
paddle
::
framework
::
make_ddim
({
4
,
16
,
16
,
32
});
// Create Target Tensor
// Create Target Tensor
egr
::
EagerTensor
t
=
CreateTensorWithValue
(
egr
::
EagerTensor
t
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
5.0
/*value*/
,
true
/*is_leaf*/
);
egr
::
EagerTensor
t_grad
=
CreateTensorWithValue
(
egr
::
EagerTensor
t_grad
=
egr_utils_api
::
CreateTensorWithValue
(
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
ddim
,
paddle
::
platform
::
CPUPlace
(),
pten
::
DataType
::
FLOAT32
,
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
pten
::
DataLayout
::
NCHW
,
1.0
/*value*/
,
false
/*is_leaf*/
);
CHECK_EQ
(
IsLeafTensor
(
t
),
true
);
CHECK_EQ
(
egr_utils_api
::
IsLeafTensor
(
t
),
true
);
// Test Utils
// Test Utils
CompareTensorWithValue
<
float
>
(
t
,
5.0
);
eager_test
::
CompareTensorWithValue
<
float
>
(
t
,
5.0
);
egr
::
AutogradMeta
*
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
t
);
egr
::
AutogradMeta
*
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
t
);
*
meta
->
MutableGrad
()
=
t_grad
;
*
meta
->
MutableGrad
()
=
t_grad
;
CompareGradTensorWithValue
<
float
>
(
t
,
1.0
);
eager_test
::
CompareGradTensorWithValue
<
float
>
(
t
,
1.0
);
}
}
}
// namespace e
ager_test
}
// namespace e
gr
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录