未验证 提交 b06a5946 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part12 imperative_optimizer2resnet (#48833)

上级 a7014f09
...@@ -22,7 +22,6 @@ import paddle ...@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import ( from paddle.fluid.optimizer import (
AdadeltaOptimizer, AdadeltaOptimizer,
AdagradOptimizer, AdagradOptimizer,
...@@ -268,13 +267,8 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): ...@@ -268,13 +267,8 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase):
...@@ -301,13 +295,8 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): ...@@ -301,13 +295,8 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase):
...@@ -334,13 +323,8 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): ...@@ -334,13 +323,8 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase):
...@@ -367,13 +351,8 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): ...@@ -367,13 +351,8 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_adam(self):
self._check_mlp()
def test_adam(self): def test_adam(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adam()
self.func_test_adam()
class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase):
...@@ -394,24 +373,14 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): ...@@ -394,24 +373,14 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd_cycle(self): def test_sgd_cycle(self):
self.cycle = True self.cycle = True
self._check_mlp() self._check_mlp()
def test_sgd_cycle(self): def test_sgd(self):
with _test_eager_guard():
self.func_test_sgd_cycle()
self.func_test_sgd_cycle()
def func_test_sgd(self):
self.cycle = False self.cycle = False
self._check_mlp() self._check_mlp()
def test_sgd(self):
with _test_eager_guard():
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -431,13 +400,8 @@ class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase): ...@@ -431,13 +400,8 @@ class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase):
...@@ -458,17 +422,12 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): ...@@ -458,17 +422,12 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestOptimizerLearningRate(unittest.TestCase): class TestOptimizerLearningRate(unittest.TestCase):
def func_test_constant_lr(self): def test_constant_lr(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -494,12 +453,7 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -494,12 +453,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0) np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0)
def test_constant_lr(self): def test_lr_decay(self):
with _test_eager_guard():
self.func_test_constant_lr()
self.func_test_constant_lr()
def func_test_lr_decay(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -530,12 +484,7 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -530,12 +484,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0)
def test_lr_decay(self): def test_lr_decay_natural_exp(self):
with _test_eager_guard():
self.func_test_lr_decay()
self.func_test_lr_decay()
def func_test_lr_decay_natural_exp(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -569,12 +518,7 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -569,12 +518,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0)
def test_lr_decay_natural_exp(self): def test_set_lr(self):
with _test_eager_guard():
self.func_test_lr_decay_natural_exp()
self.func_test_lr_decay_natural_exp()
def func_test_set_lr(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -615,11 +559,6 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -615,11 +559,6 @@ class TestOptimizerLearningRate(unittest.TestCase):
) )
adam.set_lr(0.01) adam.set_lr(0.01)
def test_set_lr(self):
with _test_eager_guard():
self.func_test_set_lr()
self.func_test_set_lr()
class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -632,13 +571,8 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): ...@@ -632,13 +571,8 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
return optimizer return optimizer
def func_test_momentum(self):
self._check_mlp()
def test_momentum(self): def test_momentum(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_momentum()
self.func_test_momentum()
class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase):
...@@ -652,13 +586,8 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): ...@@ -652,13 +586,8 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase):
optimizer = LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer = LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)
return optimizer return optimizer
def func_test_larsmomentum(self):
self._check_mlp()
def test_larsmomentum(self): def test_larsmomentum(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_larsmomentum()
self.func_test_larsmomentum()
class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase):
...@@ -672,13 +601,8 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): ...@@ -672,13 +601,8 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase):
optimizer = AdagradOptimizer(learning_rate=0.2) optimizer = AdagradOptimizer(learning_rate=0.2)
return optimizer return optimizer
def func_test_adagrad(self):
self._check_mlp()
def test_adagrad(self): def test_adagrad(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adagrad()
self.func_test_adagrad()
class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase):
...@@ -692,13 +616,8 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): ...@@ -692,13 +616,8 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase):
optimizer = AdamaxOptimizer(learning_rate=0.2) optimizer = AdamaxOptimizer(learning_rate=0.2)
return optimizer return optimizer
def func_test_adamax(self):
self._check_mlp()
def test_adamax(self): def test_adamax(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adamax()
self.func_test_adamax()
class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase):
...@@ -720,13 +639,8 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): ...@@ -720,13 +639,8 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase):
optimizer._seed = 100 optimizer._seed = 100
return optimizer return optimizer
def func_test_dpsgd(self):
self._check_mlp(place=fluid.CPUPlace())
def test_dpsgd(self): def test_dpsgd(self):
with _test_eager_guard(): self._check_mlp(place=fluid.CPUPlace())
self.func_test_dpsgd()
self.func_test_dpsgd()
class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase):
...@@ -740,13 +654,8 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): ...@@ -740,13 +654,8 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase):
optimizer = DecayedAdagradOptimizer(learning_rate=0.2) optimizer = DecayedAdagradOptimizer(learning_rate=0.2)
return optimizer return optimizer
def func_test_decayadagrad(self):
self._check_mlp()
def test_decayadagrad(self): def test_decayadagrad(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_decayadagrad()
self.func_test_decayadagrad()
class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase):
...@@ -765,13 +674,8 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): ...@@ -765,13 +674,8 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_adadelta(self):
self._check_mlp()
def test_adadelta(self): def test_adadelta(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adadelta()
self.func_test_adadelta()
class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase):
...@@ -785,13 +689,8 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): ...@@ -785,13 +689,8 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase):
optimizer = RMSPropOptimizer(learning_rate=0.1) optimizer = RMSPropOptimizer(learning_rate=0.1)
return optimizer return optimizer
def func_test_rmsprop(self):
self._check_mlp()
def test_rmsprop(self): def test_rmsprop(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_rmsprop()
self.func_test_rmsprop()
class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase): class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase):
...@@ -805,13 +704,8 @@ class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase): ...@@ -805,13 +704,8 @@ class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase):
optimizer = FtrlOptimizer(learning_rate=0.1) optimizer = FtrlOptimizer(learning_rate=0.1)
return optimizer return optimizer
def func_test_ftrl(self):
self._check_mlp()
def test_ftrl(self): def test_ftrl(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_ftrl()
self.func_test_ftrl()
def exclude_fn(param): def exclude_fn(param):
...@@ -845,15 +739,10 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase): ...@@ -845,15 +739,10 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_modelaverage(self): def test_modelaverage(self):
exception_message = "In dygraph, don't support ModelAverage." exception_message = "In dygraph, don't support ModelAverage."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_modelaverage(self):
with _test_eager_guard():
self.func_test_modelaverage()
self.func_test_modelaverage()
class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -866,32 +755,22 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): ...@@ -866,32 +755,22 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_dgcmomentum(self): def test_dgcmomentum(self):
exception_message = "In dygraph, don't support DGCMomentumOptimizer." exception_message = "In dygraph, don't support DGCMomentumOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_dgcmomentum(self):
with _test_eager_guard():
self.func_test_dgcmomentum()
self.func_test_dgcmomentum()
class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase): class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
optimizer = ExponentialMovingAverage(0.999) optimizer = ExponentialMovingAverage(0.999)
return optimizer return optimizer
def func_test_exponentialmoving(self): def test_exponentialmoving(self):
exception_message = ( exception_message = (
"In dygraph, don't support ExponentialMovingAverage." "In dygraph, don't support ExponentialMovingAverage."
) )
self._check_exception(exception_message) self._check_exception(exception_message)
def test_exponentialmoving(self):
with _test_eager_guard():
self.func_test_exponentialmoving()
self.func_test_exponentialmoving()
class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): class TestImperativePipelineOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -901,15 +780,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): ...@@ -901,15 +780,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase):
optimizer = PipelineOptimizer(optimizer) optimizer = PipelineOptimizer(optimizer)
return optimizer return optimizer
def func_test_pipline(self): def test_pipline(self):
exception_message = "In dygraph, don't support PipelineOptimizer." exception_message = "In dygraph, don't support PipelineOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_pipline(self):
with _test_eager_guard():
self.func_test_pipline()
self.func_test_pipline()
class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -919,15 +793,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): ...@@ -919,15 +793,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase):
optimizer = LookaheadOptimizer(optimizer, alpha=0.5, k=5) optimizer = LookaheadOptimizer(optimizer, alpha=0.5, k=5)
return optimizer return optimizer
def func_test_lookahead(self): def test_lookahead(self):
exception_message = "In dygraph, don't support LookaheadOptimizer." exception_message = "In dygraph, don't support LookaheadOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_lookahead(self):
with _test_eager_guard():
self.func_test_lookahead()
self.func_test_lookahead()
class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -937,18 +806,13 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): ...@@ -937,18 +806,13 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase):
optimizer = RecomputeOptimizer(optimizer) optimizer = RecomputeOptimizer(optimizer)
return optimizer return optimizer
def func_test_recompute(self): def test_recompute(self):
exception_message = "In dygraph, don't support RecomputeOptimizer." exception_message = "In dygraph, don't support RecomputeOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_recompute(self):
with _test_eager_guard():
self.func_test_recompute()
self.func_test_recompute()
class TestImperativeOptimizerList(unittest.TestCase): class TestImperativeOptimizerList(unittest.TestCase):
def func_test_parameter_list(self): def test_parameter_list(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
linear_1 = paddle.nn.Linear(10, 10) linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10) linear_2 = paddle.nn.Linear(10, 10)
...@@ -974,11 +838,6 @@ class TestImperativeOptimizerList(unittest.TestCase): ...@@ -974,11 +838,6 @@ class TestImperativeOptimizerList(unittest.TestCase):
== len(linear_1.parameters() + linear_2.parameters()) == len(linear_1.parameters() + linear_2.parameters())
) )
def test_parameter_list(self):
with _test_eager_guard():
self.func_test_parameter_list()
self.func_test_parameter_list()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -22,7 +22,6 @@ import paddle ...@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import ( from paddle.fluid.optimizer import (
AdadeltaOptimizer, AdadeltaOptimizer,
AdagradOptimizer, AdagradOptimizer,
...@@ -287,13 +286,8 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): ...@@ -287,13 +286,8 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase):
...@@ -314,13 +308,8 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): ...@@ -314,13 +308,8 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase):
...@@ -341,13 +330,8 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): ...@@ -341,13 +330,8 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase):
...@@ -368,13 +352,8 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): ...@@ -368,13 +352,8 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_adam(self):
self._check_mlp()
def test_adam(self): def test_adam(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adam()
self.func_test_adam()
class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase):
...@@ -395,24 +374,14 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): ...@@ -395,24 +374,14 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd_cycle(self): def test_sgd_cycle(self):
self.cycle = True self.cycle = True
self._check_mlp() self._check_mlp()
def test_sgd_cycle(self): def test_sgd(self):
with _test_eager_guard():
self.func_test_sgd_cycle()
self.func_test_sgd_cycle()
def func_test_sgd(self):
self.cycle = False self.cycle = False
self._check_mlp() self._check_mlp()
def test_sgd(self):
with _test_eager_guard():
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -432,13 +401,8 @@ class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase): ...@@ -432,13 +401,8 @@ class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase):
...@@ -459,13 +423,8 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): ...@@ -459,13 +423,8 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase):
...@@ -486,13 +445,8 @@ class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase): ...@@ -486,13 +445,8 @@ class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase): class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase):
...@@ -517,13 +471,8 @@ class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase): ...@@ -517,13 +471,8 @@ class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase):
...@@ -544,13 +493,8 @@ class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase): ...@@ -544,13 +493,8 @@ class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase): class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase):
...@@ -571,13 +515,8 @@ class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase): ...@@ -571,13 +515,8 @@ class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase): class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase):
...@@ -596,17 +535,12 @@ class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase): ...@@ -596,17 +535,12 @@ class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_sgd(self):
self._check_mlp()
def test_sgd(self): def test_sgd(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_sgd()
self.func_test_sgd()
class TestOptimizerLearningRate(unittest.TestCase): class TestOptimizerLearningRate(unittest.TestCase):
def func_test_constant_lr(self): def test_constant_lr(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -630,12 +564,7 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -630,12 +564,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0) np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0)
def test_constant_lr(self): def test_lr_decay(self):
with _test_eager_guard():
self.func_test_constant_lr()
self.func_test_constant_lr()
def func_test_lr_decay(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -664,12 +593,7 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -664,12 +593,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0)
scheduler.step() scheduler.step()
def test_lr_decay(self): def test_lr_scheduler_natural_exp(self):
with _test_eager_guard():
self.func_test_lr_decay()
self.func_test_lr_decay()
def func_test_lr_scheduler_natural_exp(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -694,12 +618,7 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -694,12 +618,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0)
scheduler.step() scheduler.step()
def test_lr_scheduler_natural_exp(self): def test_set_lr(self):
with _test_eager_guard():
self.func_test_lr_scheduler_natural_exp()
self.func_test_lr_scheduler_natural_exp()
def func_test_set_lr(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
...@@ -735,11 +654,6 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -735,11 +654,6 @@ class TestOptimizerLearningRate(unittest.TestCase):
) )
adam.set_lr(0.01) adam.set_lr(0.01)
def test_set_lr(self):
with _test_eager_guard():
self.func_test_set_lr()
self.func_test_set_lr()
class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -752,13 +666,8 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): ...@@ -752,13 +666,8 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
return optimizer return optimizer
def func_test_momentum(self):
self._check_mlp()
def test_momentum(self): def test_momentum(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_momentum()
self.func_test_momentum()
class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase):
...@@ -772,13 +681,8 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): ...@@ -772,13 +681,8 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase):
optimizer = LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer = LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)
return optimizer return optimizer
def func_test_larsmomentum(self):
self._check_mlp()
def test_larsmomentum(self): def test_larsmomentum(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_larsmomentum()
self.func_test_larsmomentum()
class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase):
...@@ -792,13 +696,8 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): ...@@ -792,13 +696,8 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase):
optimizer = AdagradOptimizer(learning_rate=0.2) optimizer = AdagradOptimizer(learning_rate=0.2)
return optimizer return optimizer
def func_test_adagrad(self):
self._check_mlp()
def test_adagrad(self): def test_adagrad(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adagrad()
self.func_test_adagrad()
class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase):
...@@ -812,13 +711,8 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): ...@@ -812,13 +711,8 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase):
optimizer = AdamaxOptimizer(learning_rate=0.2) optimizer = AdamaxOptimizer(learning_rate=0.2)
return optimizer return optimizer
def func_test_adamax(self):
self._check_mlp()
def test_adamax(self): def test_adamax(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adamax()
self.func_test_adamax()
class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase):
...@@ -840,13 +734,8 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): ...@@ -840,13 +734,8 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase):
optimizer._seed = 100 optimizer._seed = 100
return optimizer return optimizer
def func_test_dpsgd(self):
self._check_mlp(place=fluid.CPUPlace())
def test_dpsgd(self): def test_dpsgd(self):
with _test_eager_guard(): self._check_mlp(place=fluid.CPUPlace())
self.func_test_dpsgd()
self.func_test_dpsgd()
class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase):
...@@ -860,13 +749,8 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): ...@@ -860,13 +749,8 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase):
optimizer = DecayedAdagradOptimizer(learning_rate=0.2) optimizer = DecayedAdagradOptimizer(learning_rate=0.2)
return optimizer return optimizer
def func_test_decayadagrad(self):
self._check_mlp()
def test_decayadagrad(self): def test_decayadagrad(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_decayadagrad()
self.func_test_decayadagrad()
class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase):
...@@ -885,13 +769,8 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): ...@@ -885,13 +769,8 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_adadelta(self):
self._check_mlp()
def test_adadelta(self): def test_adadelta(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_adadelta()
self.func_test_adadelta()
class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase):
...@@ -905,13 +784,8 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): ...@@ -905,13 +784,8 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase):
optimizer = RMSPropOptimizer(learning_rate=0.1) optimizer = RMSPropOptimizer(learning_rate=0.1)
return optimizer return optimizer
def func_test_rmsprop(self):
self._check_mlp()
def test_rmsprop(self): def test_rmsprop(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_rmsprop()
self.func_test_rmsprop()
class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase): class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase):
...@@ -925,13 +799,8 @@ class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase): ...@@ -925,13 +799,8 @@ class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase):
optimizer = FtrlOptimizer(learning_rate=0.1) optimizer = FtrlOptimizer(learning_rate=0.1)
return optimizer return optimizer
def func_test_ftrl(self):
self._check_mlp()
def test_ftrl(self): def test_ftrl(self):
with _test_eager_guard(): self._check_mlp()
self.func_test_ftrl()
self.func_test_ftrl()
def exclude_fn(param): def exclude_fn(param):
...@@ -965,15 +834,10 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase): ...@@ -965,15 +834,10 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_modelaverage(self): def test_modelaverage(self):
exception_message = "In dygraph, don't support ModelAverage." exception_message = "In dygraph, don't support ModelAverage."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_modelaverage(self):
with _test_eager_guard():
self.func_test_modelaverage()
self.func_test_modelaverage()
class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -986,32 +850,22 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): ...@@ -986,32 +850,22 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase):
) )
return optimizer return optimizer
def func_test_dgcmomentum(self): def test_dgcmomentum(self):
exception_message = "In dygraph, don't support DGCMomentumOptimizer." exception_message = "In dygraph, don't support DGCMomentumOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_dgcmomentum(self):
with _test_eager_guard():
self.func_test_dgcmomentum()
self.func_test_dgcmomentum()
class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase): class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
optimizer = ExponentialMovingAverage(0.999) optimizer = ExponentialMovingAverage(0.999)
return optimizer return optimizer
def func_test_exponentialmoving(self): def test_exponentialmoving(self):
exception_message = ( exception_message = (
"In dygraph, don't support ExponentialMovingAverage." "In dygraph, don't support ExponentialMovingAverage."
) )
self._check_exception(exception_message) self._check_exception(exception_message)
def test_exponentialmoving(self):
with _test_eager_guard():
self.func_test_exponentialmoving()
self.func_test_exponentialmoving()
class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): class TestImperativePipelineOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -1021,15 +875,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): ...@@ -1021,15 +875,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase):
optimizer = PipelineOptimizer(optimizer) optimizer = PipelineOptimizer(optimizer)
return optimizer return optimizer
def func_test_pipline(self): def test_pipline(self):
exception_message = "In dygraph, don't support PipelineOptimizer." exception_message = "In dygraph, don't support PipelineOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_pipline(self):
with _test_eager_guard():
self.func_test_pipline()
self.func_test_pipline()
class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -1039,15 +888,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): ...@@ -1039,15 +888,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase):
optimizer = LookaheadOptimizer(optimizer, alpha=0.5, k=5) optimizer = LookaheadOptimizer(optimizer, alpha=0.5, k=5)
return optimizer return optimizer
def func_test_lookahead(self): def test_lookahead(self):
exception_message = "In dygraph, don't support LookaheadOptimizer." exception_message = "In dygraph, don't support LookaheadOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_lookahead(self):
with _test_eager_guard():
self.func_test_lookahead()
self.func_test_lookahead()
class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
...@@ -1057,18 +901,13 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): ...@@ -1057,18 +901,13 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase):
optimizer = RecomputeOptimizer(optimizer) optimizer = RecomputeOptimizer(optimizer)
return optimizer return optimizer
def func_test_recompute(self): def test_recompute(self):
exception_message = "In dygraph, don't support RecomputeOptimizer." exception_message = "In dygraph, don't support RecomputeOptimizer."
self._check_exception(exception_message) self._check_exception(exception_message)
def test_recompute(self):
with _test_eager_guard():
self.func_test_recompute()
self.func_test_recompute()
class TestImperativeOptimizerList(unittest.TestCase): class TestImperativeOptimizerList(unittest.TestCase):
def func_test_parameter_list(self): def test_parameter_list(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
linear_1 = paddle.nn.Linear(10, 10) linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10) linear_2 = paddle.nn.Linear(10, 10)
...@@ -1094,11 +933,6 @@ class TestImperativeOptimizerList(unittest.TestCase): ...@@ -1094,11 +933,6 @@ class TestImperativeOptimizerList(unittest.TestCase):
== len(linear_1.parameters() + linear_2.parameters()) == len(linear_1.parameters() + linear_2.parameters())
) )
def test_parameter_list(self):
with _test_eager_guard():
self.func_test_parameter_list()
self.func_test_parameter_list()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,11 +18,10 @@ import numpy as np ...@@ -18,11 +18,10 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TestImperativePartitialBackward(unittest.TestCase): class TestImperativePartitialBackward(unittest.TestCase):
def func_partitial_backward(self): def test_partitial_backward(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = np.random.randn(2, 4, 5).astype("float32") x = np.random.randn(2, 4, 5).astype("float32")
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
...@@ -53,11 +52,6 @@ class TestImperativePartitialBackward(unittest.TestCase): ...@@ -53,11 +52,6 @@ class TestImperativePartitialBackward(unittest.TestCase):
linear1.clear_gradients() linear1.clear_gradients()
linear2.clear_gradients() linear2.clear_gradients()
def test_partitial_backward(self):
with _test_eager_guard():
self.func_partitial_backward()
self.func_partitial_backward()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -23,7 +23,6 @@ import paddle.fluid as fluid ...@@ -23,7 +23,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Embedding from paddle.nn import Embedding
...@@ -238,15 +237,10 @@ class PtbModel(fluid.Layer): ...@@ -238,15 +237,10 @@ class PtbModel(fluid.Layer):
class TestDygraphPtbRnn(unittest.TestCase): class TestDygraphPtbRnn(unittest.TestCase):
def func_test_ptb_rnn(self): def test_ptb_rnn(self):
for is_sparse in [True, False]: for is_sparse in [True, False]:
self.ptb_rnn_cpu_float32(is_sparse) self.ptb_rnn_cpu_float32(is_sparse)
def test_ptb_rnn(self):
with _test_eager_guard():
self.func_test_ptb_rnn()
self.func_test_ptb_rnn()
def ptb_rnn_cpu_float32(self, is_sparse): def ptb_rnn_cpu_float32(self, is_sparse):
seed = 90 seed = 90
hidden_size = 10 hidden_size = 10
......
...@@ -23,12 +23,11 @@ import paddle.fluid as fluid ...@@ -23,12 +23,11 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
class TestDygraphPtbRnnSortGradient(unittest.TestCase): class TestDygraphPtbRnnSortGradient(unittest.TestCase):
def func_ptb_rnn_sort_gradient(self): def test_ptb_rnn_sort_gradient(self):
for is_sparse in [True, False]: for is_sparse in [True, False]:
self.ptb_rnn_sort_gradient_cpu_float32(is_sparse) self.ptb_rnn_sort_gradient_cpu_float32(is_sparse)
...@@ -192,11 +191,6 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -192,11 +191,6 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
for key, value in static_param_updated.items(): for key, value in static_param_updated.items():
np.testing.assert_array_equal(value, dy_param_updated[key]) np.testing.assert_array_equal(value, dy_param_updated[key])
def test_ptb_rnn_sort_gradient(self):
with _test_eager_guard():
self.func_ptb_rnn_sort_gradient()
self.func_ptb_rnn_sort_gradient()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
class RecurrentTest(fluid.Layer): class RecurrentTest(fluid.Layer):
...@@ -62,23 +61,22 @@ class TestRecurrentFeed(unittest.TestCase): ...@@ -62,23 +61,22 @@ class TestRecurrentFeed(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard(): fluid.default_startup_program().random_seed = seed
fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed
fluid.default_main_program().random_seed = seed original_in1 = to_variable(original_np1)
original_in1 = to_variable(original_np1) original_in2 = to_variable(original_np2)
original_in2 = to_variable(original_np2) original_in1.stop_gradient = False
original_in1.stop_gradient = False original_in2.stop_gradient = False
original_in2.stop_gradient = False rt = RecurrentTest("RecurrentTest")
rt = RecurrentTest("RecurrentTest")
for i in range(3):
for i in range(3): sum_out, out = rt(original_in1, original_in2)
sum_out, out = rt(original_in1, original_in2) original_in1 = out
original_in1 = out eager_sum_out_value = sum_out.numpy()
eager_sum_out_value = sum_out.numpy() sum_out.backward()
sum_out.backward() eager_dyout = out.gradient()
eager_dyout = out.gradient() original_in1.stop_gradient = True
original_in1.stop_gradient = True rt.clear_gradients()
rt.clear_gradients()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
with new_program_scope(): with new_program_scope():
......
...@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope ...@@ -20,7 +20,6 @@ from test_imperative_base import new_program_scope
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
...@@ -106,12 +105,11 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -106,12 +105,11 @@ class TestImperativeMnist(unittest.TestCase):
dy_out, dy_param_init_value, dy_param_value = run_dygraph() dy_out, dy_param_init_value, dy_param_value = run_dygraph()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
with _test_eager_guard(): (
( eager_out,
eager_out, eager_param_init_value,
eager_param_init_value, eager_param_value,
eager_param_value, ) = run_dygraph()
) = run_dygraph()
with new_program_scope(): with new_program_scope():
paddle.seed(seed) paddle.seed(seed)
......
...@@ -22,7 +22,6 @@ import paddle ...@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.nn import BatchNorm from paddle.nn import BatchNorm
...@@ -253,7 +252,7 @@ class TestDygraphResnet(unittest.TestCase): ...@@ -253,7 +252,7 @@ class TestDygraphResnet(unittest.TestCase):
return _reader_imple return _reader_imple
def func_test_resnet_float32(self): def test_resnet_float32(self):
seed = 90 seed = 90
batch_size = train_parameters["batch_size"] batch_size = train_parameters["batch_size"]
...@@ -462,11 +461,6 @@ class TestDygraphResnet(unittest.TestCase): ...@@ -462,11 +461,6 @@ class TestDygraphResnet(unittest.TestCase):
self.assertTrue(np.isfinite(value.all())) self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any())) self.assertFalse(np.isnan(value.any()))
def test_resnet_float32(self):
with _test_eager_guard():
self.func_test_resnet_float32()
self.func_test_resnet_float32()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -22,7 +22,6 @@ import paddle ...@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
batch_size = 8 batch_size = 8
train_parameters = { train_parameters = {
...@@ -73,7 +72,7 @@ def optimizer_setting(params, parameter_list=None): ...@@ -73,7 +72,7 @@ def optimizer_setting(params, parameter_list=None):
class TestDygraphResnetSortGradient(unittest.TestCase): class TestDygraphResnetSortGradient(unittest.TestCase):
def func_test_resnet_sort_gradient_float32(self): def test_resnet_sort_gradient_float32(self):
seed = 90 seed = 90
batch_size = train_parameters["batch_size"] batch_size = train_parameters["batch_size"]
...@@ -266,11 +265,6 @@ class TestDygraphResnetSortGradient(unittest.TestCase): ...@@ -266,11 +265,6 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
self.assertTrue(np.isfinite(value.all())) self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any())) self.assertFalse(np.isnan(value.any()))
def test_resnet_sort_gradient_float32(self):
with _test_eager_guard():
self.func_test_resnet_sort_gradient_float32()
self.func_test_resnet_sort_gradient_float32()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册