Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b06a5946
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b06a5946
编写于
12月 12, 2022
作者:
姜
姜永久
提交者:
GitHub
12月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm unittests eager guard tests part12 imperative_optimizer2resnet (#48833)
上级
a7014f09
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
87 addition
and
428 deletion
+87
-428
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+28
-169
python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py
...dle/fluid/tests/unittests/test_imperative_optimizer_v2.py
+33
-199
python/paddle/fluid/tests/unittests/test_imperative_partitial_backward.py
...uid/tests/unittests/test_imperative_partitial_backward.py
+1
-7
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
...n/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
+1
-7
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py
...ests/unittests/test_imperative_ptb_rnn_sorted_gradient.py
+1
-7
python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py
.../fluid/tests/unittests/test_imperative_recurrent_usage.py
+16
-18
python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py
...le/fluid/tests/unittests/test_imperative_reinforcement.py
+5
-7
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+1
-7
python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py
...tests/unittests/test_imperative_resnet_sorted_gradient.py
+1
-7
未找到文件。
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
b06a5946
...
...
@@ -22,7 +22,6 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.distributed.fleet.meta_optimizers
import
DGCMomentumOptimizer
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.optimizer
import
(
AdadeltaOptimizer
,
AdagradOptimizer
,
...
...
@@ -268,13 +267,8 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerNaturalExpDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -301,13 +295,8 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerExponentialDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -334,13 +323,8 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerInverseTimeDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -367,13 +351,8 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_adam
(
self
):
self
.
_check_mlp
()
def
test_adam
(
self
):
with
_test_eager_guard
():
self
.
func_test_adam
()
self
.
func_test_adam
()
self
.
_check_mlp
()
class
TestImperativeOptimizerPolynomialDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -394,24 +373,14 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_
test_sgd_cycle
(
self
):
def
test_sgd_cycle
(
self
):
self
.
cycle
=
True
self
.
_check_mlp
()
def
test_sgd_cycle
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd_cycle
()
self
.
func_test_sgd_cycle
()
def
func_test_sgd
(
self
):
def
test_sgd
(
self
):
self
.
cycle
=
False
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
class
TestImperativeOptimizerCosineDecay
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -431,13 +400,8 @@ class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerNoamDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -458,17 +422,12 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestOptimizerLearningRate
(
unittest
.
TestCase
):
def
func_
test_constant_lr
(
self
):
def
test_constant_lr
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -494,12 +453,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
0.001
,
rtol
=
1e-06
,
atol
=
0.0
)
def
test_constant_lr
(
self
):
with
_test_eager_guard
():
self
.
func_test_constant_lr
()
self
.
func_test_constant_lr
()
def
func_test_lr_decay
(
self
):
def
test_lr_decay
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -530,12 +484,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
ret
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
def
test_lr_decay
(
self
):
with
_test_eager_guard
():
self
.
func_test_lr_decay
()
self
.
func_test_lr_decay
()
def
func_test_lr_decay_natural_exp
(
self
):
def
test_lr_decay_natural_exp
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -569,12 +518,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
ret
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
def
test_lr_decay_natural_exp
(
self
):
with
_test_eager_guard
():
self
.
func_test_lr_decay_natural_exp
()
self
.
func_test_lr_decay_natural_exp
()
def
func_test_set_lr
(
self
):
def
test_set_lr
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -615,11 +559,6 @@ class TestOptimizerLearningRate(unittest.TestCase):
)
adam
.
set_lr
(
0.01
)
def
test_set_lr
(
self
):
with
_test_eager_guard
():
self
.
func_test_set_lr
()
self
.
func_test_set_lr
()
class
TestImperativeMomentumOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -632,13 +571,8 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
optimizer
=
MomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
return
optimizer
def
func_test_momentum
(
self
):
self
.
_check_mlp
()
def
test_momentum
(
self
):
with
_test_eager_guard
():
self
.
func_test_momentum
()
self
.
func_test_momentum
()
self
.
_check_mlp
()
class
TestImperativeLarsMomentumOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -652,13 +586,8 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase):
optimizer
=
LarsMomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
return
optimizer
def
func_test_larsmomentum
(
self
):
self
.
_check_mlp
()
def
test_larsmomentum
(
self
):
with
_test_eager_guard
():
self
.
func_test_larsmomentum
()
self
.
func_test_larsmomentum
()
self
.
_check_mlp
()
class
TestImperativeAdagradOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -672,13 +601,8 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase):
optimizer
=
AdagradOptimizer
(
learning_rate
=
0.2
)
return
optimizer
def
func_test_adagrad
(
self
):
self
.
_check_mlp
()
def
test_adagrad
(
self
):
with
_test_eager_guard
():
self
.
func_test_adagrad
()
self
.
func_test_adagrad
()
self
.
_check_mlp
()
class
TestImperativeAdamaxOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -692,13 +616,8 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase):
optimizer
=
AdamaxOptimizer
(
learning_rate
=
0.2
)
return
optimizer
def
func_test_adamax
(
self
):
self
.
_check_mlp
()
def
test_adamax
(
self
):
with
_test_eager_guard
():
self
.
func_test_adamax
()
self
.
func_test_adamax
()
self
.
_check_mlp
()
class
TestImperativeDpsgdOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -720,13 +639,8 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase):
optimizer
.
_seed
=
100
return
optimizer
def
func_test_dpsgd
(
self
):
self
.
_check_mlp
(
place
=
fluid
.
CPUPlace
())
def
test_dpsgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_dpsgd
()
self
.
func_test_dpsgd
()
self
.
_check_mlp
(
place
=
fluid
.
CPUPlace
())
class
TestImperativeDecayedAdagradOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -740,13 +654,8 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase):
optimizer
=
DecayedAdagradOptimizer
(
learning_rate
=
0.2
)
return
optimizer
def
func_test_decayadagrad
(
self
):
self
.
_check_mlp
()
def
test_decayadagrad
(
self
):
with
_test_eager_guard
():
self
.
func_test_decayadagrad
()
self
.
func_test_decayadagrad
()
self
.
_check_mlp
()
class
TestImperativeAdadeltaOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -765,13 +674,8 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_adadelta
(
self
):
self
.
_check_mlp
()
def
test_adadelta
(
self
):
with
_test_eager_guard
():
self
.
func_test_adadelta
()
self
.
func_test_adadelta
()
self
.
_check_mlp
()
class
TestImperativeRMSPropOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -785,13 +689,8 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase):
optimizer
=
RMSPropOptimizer
(
learning_rate
=
0.1
)
return
optimizer
def
func_test_rmsprop
(
self
):
self
.
_check_mlp
()
def
test_rmsprop
(
self
):
with
_test_eager_guard
():
self
.
func_test_rmsprop
()
self
.
func_test_rmsprop
()
self
.
_check_mlp
()
class
TestImperativeFtrlOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -805,13 +704,8 @@ class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase):
optimizer
=
FtrlOptimizer
(
learning_rate
=
0.1
)
return
optimizer
def
func_test_ftrl
(
self
):
self
.
_check_mlp
()
def
test_ftrl
(
self
):
with
_test_eager_guard
():
self
.
func_test_ftrl
()
self
.
func_test_ftrl
()
self
.
_check_mlp
()
def
exclude_fn
(
param
):
...
...
@@ -845,15 +739,10 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase):
)
return
optimizer
def
func_
test_modelaverage
(
self
):
def
test_modelaverage
(
self
):
exception_message
=
"In dygraph, don't support ModelAverage."
self
.
_check_exception
(
exception_message
)
def
test_modelaverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_modelaverage
()
self
.
func_test_modelaverage
()
class
TestImperativeDGCMomentumOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -866,32 +755,22 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase):
)
return
optimizer
def
func_
test_dgcmomentum
(
self
):
def
test_dgcmomentum
(
self
):
exception_message
=
"In dygraph, don't support DGCMomentumOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_dgcmomentum
(
self
):
with
_test_eager_guard
():
self
.
func_test_dgcmomentum
()
self
.
func_test_dgcmomentum
()
class
TestImperativeExponentialMovingAverage
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
optimizer
=
ExponentialMovingAverage
(
0.999
)
return
optimizer
def
func_
test_exponentialmoving
(
self
):
def
test_exponentialmoving
(
self
):
exception_message
=
(
"In dygraph, don't support ExponentialMovingAverage."
)
self
.
_check_exception
(
exception_message
)
def
test_exponentialmoving
(
self
):
with
_test_eager_guard
():
self
.
func_test_exponentialmoving
()
self
.
func_test_exponentialmoving
()
class
TestImperativePipelineOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -901,15 +780,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase):
optimizer
=
PipelineOptimizer
(
optimizer
)
return
optimizer
def
func_
test_pipline
(
self
):
def
test_pipline
(
self
):
exception_message
=
"In dygraph, don't support PipelineOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_pipline
(
self
):
with
_test_eager_guard
():
self
.
func_test_pipline
()
self
.
func_test_pipline
()
class
TestImperativeLookaheadOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -919,15 +793,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase):
optimizer
=
LookaheadOptimizer
(
optimizer
,
alpha
=
0.5
,
k
=
5
)
return
optimizer
def
func_
test_lookahead
(
self
):
def
test_lookahead
(
self
):
exception_message
=
"In dygraph, don't support LookaheadOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_lookahead
(
self
):
with
_test_eager_guard
():
self
.
func_test_lookahead
()
self
.
func_test_lookahead
()
class
TestImperativeRecomputeOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -937,18 +806,13 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase):
optimizer
=
RecomputeOptimizer
(
optimizer
)
return
optimizer
def
func_
test_recompute
(
self
):
def
test_recompute
(
self
):
exception_message
=
"In dygraph, don't support RecomputeOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_recompute
(
self
):
with
_test_eager_guard
():
self
.
func_test_recompute
()
self
.
func_test_recompute
()
class
TestImperativeOptimizerList
(
unittest
.
TestCase
):
def
func_
test_parameter_list
(
self
):
def
test_parameter_list
(
self
):
with
fluid
.
dygraph
.
guard
():
linear_1
=
paddle
.
nn
.
Linear
(
10
,
10
)
linear_2
=
paddle
.
nn
.
Linear
(
10
,
10
)
...
...
@@ -974,11 +838,6 @@ class TestImperativeOptimizerList(unittest.TestCase):
==
len
(
linear_1
.
parameters
()
+
linear_2
.
parameters
())
)
def
test_parameter_list
(
self
):
with
_test_eager_guard
():
self
.
func_test_parameter_list
()
self
.
func_test_parameter_list
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py
浏览文件 @
b06a5946
...
...
@@ -22,7 +22,6 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.distributed.fleet.meta_optimizers
import
DGCMomentumOptimizer
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.optimizer
import
(
AdadeltaOptimizer
,
AdagradOptimizer
,
...
...
@@ -287,13 +286,8 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerNaturalExpDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -314,13 +308,8 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerExponentialDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -341,13 +330,8 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerInverseTimeDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -368,13 +352,8 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_adam
(
self
):
self
.
_check_mlp
()
def
test_adam
(
self
):
with
_test_eager_guard
():
self
.
func_test_adam
()
self
.
func_test_adam
()
self
.
_check_mlp
()
class
TestImperativeOptimizerPolynomialDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -395,24 +374,14 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_
test_sgd_cycle
(
self
):
def
test_sgd_cycle
(
self
):
self
.
cycle
=
True
self
.
_check_mlp
()
def
test_sgd_cycle
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd_cycle
()
self
.
func_test_sgd_cycle
()
def
func_test_sgd
(
self
):
def
test_sgd
(
self
):
self
.
cycle
=
False
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
class
TestImperativeOptimizerCosineAnnealingDecay
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -432,13 +401,8 @@ class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerNoamDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -459,13 +423,8 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerLambdaDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -486,13 +445,8 @@ class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerLinearWarmup
(
TestImperativeOptimizerBase
):
...
...
@@ -517,13 +471,8 @@ class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerMultiStepDecay
(
TestImperativeOptimizerBase
):
...
...
@@ -544,13 +493,8 @@ class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerStepLR
(
TestImperativeOptimizerBase
):
...
...
@@ -571,13 +515,8 @@ class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestImperativeOptimizerReduceOnPlateau
(
TestImperativeOptimizerBase
):
...
...
@@ -596,17 +535,12 @@ class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_sgd
(
self
):
self
.
_check_mlp
()
def
test_sgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd
()
self
.
func_test_sgd
()
self
.
_check_mlp
()
class
TestOptimizerLearningRate
(
unittest
.
TestCase
):
def
func_
test_constant_lr
(
self
):
def
test_constant_lr
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -630,12 +564,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
0.001
,
rtol
=
1e-06
,
atol
=
0.0
)
def
test_constant_lr
(
self
):
with
_test_eager_guard
():
self
.
func_test_constant_lr
()
self
.
func_test_constant_lr
()
def
func_test_lr_decay
(
self
):
def
test_lr_decay
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -664,12 +593,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
ret
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
scheduler
.
step
()
def
test_lr_decay
(
self
):
with
_test_eager_guard
():
self
.
func_test_lr_decay
()
self
.
func_test_lr_decay
()
def
func_test_lr_scheduler_natural_exp
(
self
):
def
test_lr_scheduler_natural_exp
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -694,12 +618,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
ret
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
scheduler
.
step
()
def
test_lr_scheduler_natural_exp
(
self
):
with
_test_eager_guard
():
self
.
func_test_lr_scheduler_natural_exp
()
self
.
func_test_lr_scheduler_natural_exp
()
def
func_test_set_lr
(
self
):
def
test_set_lr
(
self
):
with
fluid
.
dygraph
.
guard
():
a
=
np
.
random
.
uniform
(
-
0.1
,
0.1
,
[
10
,
10
]).
astype
(
"float32"
)
...
...
@@ -735,11 +654,6 @@ class TestOptimizerLearningRate(unittest.TestCase):
)
adam
.
set_lr
(
0.01
)
def
test_set_lr
(
self
):
with
_test_eager_guard
():
self
.
func_test_set_lr
()
self
.
func_test_set_lr
()
class
TestImperativeMomentumOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -752,13 +666,8 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
optimizer
=
MomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
return
optimizer
def
func_test_momentum
(
self
):
self
.
_check_mlp
()
def
test_momentum
(
self
):
with
_test_eager_guard
():
self
.
func_test_momentum
()
self
.
func_test_momentum
()
self
.
_check_mlp
()
class
TestImperativeLarsMomentumOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -772,13 +681,8 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase):
optimizer
=
LarsMomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
return
optimizer
def
func_test_larsmomentum
(
self
):
self
.
_check_mlp
()
def
test_larsmomentum
(
self
):
with
_test_eager_guard
():
self
.
func_test_larsmomentum
()
self
.
func_test_larsmomentum
()
self
.
_check_mlp
()
class
TestImperativeAdagradOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -792,13 +696,8 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase):
optimizer
=
AdagradOptimizer
(
learning_rate
=
0.2
)
return
optimizer
def
func_test_adagrad
(
self
):
self
.
_check_mlp
()
def
test_adagrad
(
self
):
with
_test_eager_guard
():
self
.
func_test_adagrad
()
self
.
func_test_adagrad
()
self
.
_check_mlp
()
class
TestImperativeAdamaxOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -812,13 +711,8 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase):
optimizer
=
AdamaxOptimizer
(
learning_rate
=
0.2
)
return
optimizer
def
func_test_adamax
(
self
):
self
.
_check_mlp
()
def
test_adamax
(
self
):
with
_test_eager_guard
():
self
.
func_test_adamax
()
self
.
func_test_adamax
()
self
.
_check_mlp
()
class
TestImperativeDpsgdOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -840,13 +734,8 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase):
optimizer
.
_seed
=
100
return
optimizer
def
func_test_dpsgd
(
self
):
self
.
_check_mlp
(
place
=
fluid
.
CPUPlace
())
def
test_dpsgd
(
self
):
with
_test_eager_guard
():
self
.
func_test_dpsgd
()
self
.
func_test_dpsgd
()
self
.
_check_mlp
(
place
=
fluid
.
CPUPlace
())
class
TestImperativeDecayedAdagradOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -860,13 +749,8 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase):
optimizer
=
DecayedAdagradOptimizer
(
learning_rate
=
0.2
)
return
optimizer
def
func_test_decayadagrad
(
self
):
self
.
_check_mlp
()
def
test_decayadagrad
(
self
):
with
_test_eager_guard
():
self
.
func_test_decayadagrad
()
self
.
func_test_decayadagrad
()
self
.
_check_mlp
()
class
TestImperativeAdadeltaOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -885,13 +769,8 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase):
)
return
optimizer
def
func_test_adadelta
(
self
):
self
.
_check_mlp
()
def
test_adadelta
(
self
):
with
_test_eager_guard
():
self
.
func_test_adadelta
()
self
.
func_test_adadelta
()
self
.
_check_mlp
()
class
TestImperativeRMSPropOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -905,13 +784,8 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase):
optimizer
=
RMSPropOptimizer
(
learning_rate
=
0.1
)
return
optimizer
def
func_test_rmsprop
(
self
):
self
.
_check_mlp
()
def
test_rmsprop
(
self
):
with
_test_eager_guard
():
self
.
func_test_rmsprop
()
self
.
func_test_rmsprop
()
self
.
_check_mlp
()
class
TestImperativeFtrlOptimizer
(
TestImperativeOptimizerBase
):
...
...
@@ -925,13 +799,8 @@ class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase):
optimizer
=
FtrlOptimizer
(
learning_rate
=
0.1
)
return
optimizer
def
func_test_ftrl
(
self
):
self
.
_check_mlp
()
def
test_ftrl
(
self
):
with
_test_eager_guard
():
self
.
func_test_ftrl
()
self
.
func_test_ftrl
()
self
.
_check_mlp
()
def
exclude_fn
(
param
):
...
...
@@ -965,15 +834,10 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase):
)
return
optimizer
def
func_
test_modelaverage
(
self
):
def
test_modelaverage
(
self
):
exception_message
=
"In dygraph, don't support ModelAverage."
self
.
_check_exception
(
exception_message
)
def
test_modelaverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_modelaverage
()
self
.
func_test_modelaverage
()
class
TestImperativeDGCMomentumOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -986,32 +850,22 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase):
)
return
optimizer
def
func_
test_dgcmomentum
(
self
):
def
test_dgcmomentum
(
self
):
exception_message
=
"In dygraph, don't support DGCMomentumOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_dgcmomentum
(
self
):
with
_test_eager_guard
():
self
.
func_test_dgcmomentum
()
self
.
func_test_dgcmomentum
()
class
TestImperativeExponentialMovingAverage
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
optimizer
=
ExponentialMovingAverage
(
0.999
)
return
optimizer
def
func_
test_exponentialmoving
(
self
):
def
test_exponentialmoving
(
self
):
exception_message
=
(
"In dygraph, don't support ExponentialMovingAverage."
)
self
.
_check_exception
(
exception_message
)
def
test_exponentialmoving
(
self
):
with
_test_eager_guard
():
self
.
func_test_exponentialmoving
()
self
.
func_test_exponentialmoving
()
class
TestImperativePipelineOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -1021,15 +875,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase):
optimizer
=
PipelineOptimizer
(
optimizer
)
return
optimizer
def
func_
test_pipline
(
self
):
def
test_pipline
(
self
):
exception_message
=
"In dygraph, don't support PipelineOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_pipline
(
self
):
with
_test_eager_guard
():
self
.
func_test_pipline
()
self
.
func_test_pipline
()
class
TestImperativeLookaheadOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -1039,15 +888,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase):
optimizer
=
LookaheadOptimizer
(
optimizer
,
alpha
=
0.5
,
k
=
5
)
return
optimizer
def
func_
test_lookahead
(
self
):
def
test_lookahead
(
self
):
exception_message
=
"In dygraph, don't support LookaheadOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_lookahead
(
self
):
with
_test_eager_guard
():
self
.
func_test_lookahead
()
self
.
func_test_lookahead
()
class
TestImperativeRecomputeOptimizer
(
TestImperativeOptimizerBase
):
def
get_optimizer_dygraph
(
self
,
parameter_list
):
...
...
@@ -1057,18 +901,13 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase):
optimizer
=
RecomputeOptimizer
(
optimizer
)
return
optimizer
def
func_
test_recompute
(
self
):
def
test_recompute
(
self
):
exception_message
=
"In dygraph, don't support RecomputeOptimizer."
self
.
_check_exception
(
exception_message
)
def
test_recompute
(
self
):
with
_test_eager_guard
():
self
.
func_test_recompute
()
self
.
func_test_recompute
()
class
TestImperativeOptimizerList
(
unittest
.
TestCase
):
def
func_
test_parameter_list
(
self
):
def
test_parameter_list
(
self
):
with
fluid
.
dygraph
.
guard
():
linear_1
=
paddle
.
nn
.
Linear
(
10
,
10
)
linear_2
=
paddle
.
nn
.
Linear
(
10
,
10
)
...
...
@@ -1094,11 +933,6 @@ class TestImperativeOptimizerList(unittest.TestCase):
==
len
(
linear_1
.
parameters
()
+
linear_2
.
parameters
())
)
def
test_parameter_list
(
self
):
with
_test_eager_guard
():
self
.
func_test_parameter_list
()
self
.
func_test_parameter_list
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_imperative_partitial_backward.py
浏览文件 @
b06a5946
...
...
@@ -18,11 +18,10 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.framework
import
_test_eager_guard
class
TestImperativePartitialBackward
(
unittest
.
TestCase
):
def
func
_partitial_backward
(
self
):
def
test
_partitial_backward
(
self
):
with
fluid
.
dygraph
.
guard
():
x
=
np
.
random
.
randn
(
2
,
4
,
5
).
astype
(
"float32"
)
x
=
fluid
.
dygraph
.
to_variable
(
x
)
...
...
@@ -53,11 +52,6 @@ class TestImperativePartitialBackward(unittest.TestCase):
linear1
.
clear_gradients
()
linear2
.
clear_gradients
()
def
test_partitial_backward
(
self
):
with
_test_eager_guard
():
self
.
func_partitial_backward
()
self
.
func_partitial_backward
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
浏览文件 @
b06a5946
...
...
@@ -23,7 +23,6 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.framework
as
framework
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.nn
import
Embedding
...
...
@@ -238,15 +237,10 @@ class PtbModel(fluid.Layer):
class
TestDygraphPtbRnn
(
unittest
.
TestCase
):
def
func_
test_ptb_rnn
(
self
):
def
test_ptb_rnn
(
self
):
for
is_sparse
in
[
True
,
False
]:
self
.
ptb_rnn_cpu_float32
(
is_sparse
)
def
test_ptb_rnn
(
self
):
with
_test_eager_guard
():
self
.
func_test_ptb_rnn
()
self
.
func_test_ptb_rnn
()
def
ptb_rnn_cpu_float32
(
self
,
is_sparse
):
seed
=
90
hidden_size
=
10
...
...
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py
浏览文件 @
b06a5946
...
...
@@ -23,12 +23,11 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.framework
as
framework
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.optimizer
import
SGDOptimizer
class
TestDygraphPtbRnnSortGradient
(
unittest
.
TestCase
):
def
func
_ptb_rnn_sort_gradient
(
self
):
def
test
_ptb_rnn_sort_gradient
(
self
):
for
is_sparse
in
[
True
,
False
]:
self
.
ptb_rnn_sort_gradient_cpu_float32
(
is_sparse
)
...
...
@@ -192,11 +191,6 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
for
key
,
value
in
static_param_updated
.
items
():
np
.
testing
.
assert_array_equal
(
value
,
dy_param_updated
[
key
])
def
test_ptb_rnn_sort_gradient
(
self
):
with
_test_eager_guard
():
self
.
func_ptb_rnn_sort_gradient
()
self
.
func_ptb_rnn_sort_gradient
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py
浏览文件 @
b06a5946
...
...
@@ -21,7 +21,6 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.framework
import
_test_eager_guard
class
RecurrentTest
(
fluid
.
Layer
):
...
...
@@ -62,23 +61,22 @@ class TestRecurrentFeed(unittest.TestCase):
with
fluid
.
dygraph
.
guard
():
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
with
_test_eager_guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
original_in1
=
to_variable
(
original_np1
)
original_in2
=
to_variable
(
original_np2
)
original_in1
.
stop_gradient
=
False
original_in2
.
stop_gradient
=
False
rt
=
RecurrentTest
(
"RecurrentTest"
)
for
i
in
range
(
3
):
sum_out
,
out
=
rt
(
original_in1
,
original_in2
)
original_in1
=
out
eager_sum_out_value
=
sum_out
.
numpy
()
sum_out
.
backward
()
eager_dyout
=
out
.
gradient
()
original_in1
.
stop_gradient
=
True
rt
.
clear_gradients
()
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
original_in1
=
to_variable
(
original_np1
)
original_in2
=
to_variable
(
original_np2
)
original_in1
.
stop_gradient
=
False
original_in2
.
stop_gradient
=
False
rt
=
RecurrentTest
(
"RecurrentTest"
)
for
i
in
range
(
3
):
sum_out
,
out
=
rt
(
original_in1
,
original_in2
)
original_in1
=
out
eager_sum_out_value
=
sum_out
.
numpy
()
sum_out
.
backward
()
eager_dyout
=
out
.
gradient
()
original_in1
.
stop_gradient
=
True
rt
.
clear_gradients
()
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
with
new_program_scope
():
...
...
python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py
浏览文件 @
b06a5946
...
...
@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.optimizer
import
SGDOptimizer
...
...
@@ -106,12 +105,11 @@ class TestImperativeMnist(unittest.TestCase):
dy_out
,
dy_param_init_value
,
dy_param_value
=
run_dygraph
()
with
fluid
.
dygraph
.
guard
():
with
_test_eager_guard
():
(
eager_out
,
eager_param_init_value
,
eager_param_value
,
)
=
run_dygraph
()
(
eager_out
,
eager_param_init_value
,
eager_param_value
,
)
=
run_dygraph
()
with
new_program_scope
():
paddle
.
seed
(
seed
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
b06a5946
...
...
@@ -22,7 +22,6 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.nn
import
BatchNorm
...
...
@@ -253,7 +252,7 @@ class TestDygraphResnet(unittest.TestCase):
return
_reader_imple
def
func_
test_resnet_float32
(
self
):
def
test_resnet_float32
(
self
):
seed
=
90
batch_size
=
train_parameters
[
"batch_size"
]
...
...
@@ -462,11 +461,6 @@ class TestDygraphResnet(unittest.TestCase):
self
.
assertTrue
(
np
.
isfinite
(
value
.
all
()))
self
.
assertFalse
(
np
.
isnan
(
value
.
any
()))
def
test_resnet_float32
(
self
):
with
_test_eager_guard
():
self
.
func_test_resnet_float32
()
self
.
func_test_resnet_float32
()
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py
浏览文件 @
b06a5946
...
...
@@ -22,7 +22,6 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.framework
import
_test_eager_guard
batch_size
=
8
train_parameters
=
{
...
...
@@ -73,7 +72,7 @@ def optimizer_setting(params, parameter_list=None):
class
TestDygraphResnetSortGradient
(
unittest
.
TestCase
):
def
func_
test_resnet_sort_gradient_float32
(
self
):
def
test_resnet_sort_gradient_float32
(
self
):
seed
=
90
batch_size
=
train_parameters
[
"batch_size"
]
...
...
@@ -266,11 +265,6 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
self
.
assertTrue
(
np
.
isfinite
(
value
.
all
()))
self
.
assertFalse
(
np
.
isnan
(
value
.
any
()))
def
test_resnet_sort_gradient_float32
(
self
):
with
_test_eager_guard
():
self
.
func_test_resnet_sort_gradient_float32
()
self
.
func_test_resnet_sort_gradient_float32
()
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录