From 7fc25f2292c2d5c6c9c842b13e67ca5539bcf0e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A7=9C=E6=B0=B8=E4=B9=85?= <34344716+yjjiang11@users.noreply.github.com> Date: Mon, 19 Dec 2022 17:00:44 +0800 Subject: [PATCH] Yj/rm imperative dygraph eager tests (#48782) * rm imperative op eager guard tests * modify varbase patch eager guard doc --- .../slim/tests/test_imperative_out_scale.py | 9 +--- .../contrib/slim/tests/test_imperative_ptq.py | 15 +----- .../contrib/slim/tests/test_imperative_qat.py | 8 +--- .../slim/tests/test_imperative_qat_amp.py | 8 +--- .../slim/tests/test_imperative_qat_lsq.py | 1 - .../tests/test_imperative_qat_user_defined.py | 8 +--- .../slim/tests/test_imperative_skip_op.py | 8 +--- .../fluid/dygraph/varbase_patch_methods.py | 46 ++++++++----------- 8 files changed, 28 insertions(+), 75 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py index ccfd85bf89..45e96e7cb4 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py @@ -25,7 +25,7 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.optimizer import AdamOptimizer -from paddle.fluid.framework import IrGraph, _test_eager_guard +from paddle.fluid.framework import IrGraph from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware from paddle.nn import Sequential from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -139,7 +139,7 @@ class TestImperativeOutSclae(unittest.TestCase): def tearDown(self): self.root_path.cleanup() - def func_out_scale_acc(self): + def test_out_scale_acc(self): seed = 1000 lr = 0.001 @@ -208,11 +208,6 @@ class TestImperativeOutSclae(unittest.TestCase): msg='Failed to do the imperative qat.', ) - def test_out_scale_acc(self): - with _test_eager_guard(): - self.func_out_scale_acc() - self.func_out_scale_acc() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py index df182f6c9c..161700cb2f 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py @@ -28,7 +28,6 @@ import paddle.fluid as fluid from paddle.fluid.contrib.slim.quantization import * from paddle.fluid.log_helper import get_logger from paddle.dataset.common import download -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import ( fix_model_dict, @@ -208,7 +207,7 @@ class TestImperativePTQ(unittest.TestCase): break return top1_correct_num / total_num - def func_ptq(self): + def test_ptq(self): start_time = time.time() self.set_vars() @@ -266,14 +265,9 @@ class TestImperativePTQ(unittest.TestCase): end_time = time.time() print("total time: %ss \n" % (end_time - start_time)) - def test_ptq(self): - with _test_eager_guard(): - self.func_ptq() - self.func_ptq() - class TestImperativePTQfuse(TestImperativePTQ): - def func_ptq(self): + def test_ptq(self): start_time = time.time() self.set_vars() @@ -342,11 +336,6 @@ class TestImperativePTQfuse(TestImperativePTQ): end_time = time.time() print("total time: %ss \n" % (end_time - start_time)) - def test_ptq(self): - with _test_eager_guard(): - self.func_ptq() - self.func_ptq() - class TestImperativePTQHist(TestImperativePTQ): def set_vars(self): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py index 6169d1db1e..43e4f06863 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py @@ -33,7 +33,6 @@ from paddle.nn.quant.quant_layers import ( QuantizedConv2D, QuantizedConv2DTranspose, ) -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict, ImperativeLenet paddle.enable_static() @@ -63,7 +62,7 @@ class TestImperativeQat(unittest.TestCase): self.diff_threshold = 0.03125 self.fuse_conv_bn = False - def func_qat(self): + def test_qat(self): self.set_vars() imperative_qat = ImperativeQuantAware( @@ -244,11 +243,6 @@ class TestImperativeQat(unittest.TestCase): delta_value = fp32_acc - quant_acc self.assertLessEqual(delta_value, self.diff_threshold) - def test_qat(self): - with _test_eager_guard(): - self.func_qat() - self.func_qat() - class TestImperativeQatONNXFormat(unittest.TestCase): def set_vars(self): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py index ea0a38ea55..0f76089270 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py @@ -26,7 +26,6 @@ import paddle.fluid as fluid from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware from paddle.fluid.log_helper import get_logger from paddle.dataset.common import download -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict, ImperativeLenet os.environ["CPU_NUM"] = "1" @@ -188,7 +187,7 @@ class TestImperativeQatAmp(unittest.TestCase): acc_top1 = sum(acc_top1_list) / len(acc_top1_list) return acc_top1 - def ptq(self): + def test_ptq(self): start_time = time.time() self.set_vars() @@ -239,11 +238,6 @@ class TestImperativeQatAmp(unittest.TestCase): end_time = time.time() print("total time: %ss" % (end_time - start_time)) - def test_ptq(self): - self.ptq() - with _test_eager_guard(): - self.ptq() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py index f56c90f5f4..7cf3e9ad2b 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py @@ -38,7 +38,6 @@ from paddle.nn.quant.quant_layers import ( QuantizedConv2D, QuantizedConv2DTranspose, ) -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict paddle.enable_static() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py index 650a0afdee..ae18f4a4f2 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py @@ -26,7 +26,6 @@ from paddle.nn import Sequential from paddle.nn import Linear from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose from paddle.fluid.log_helper import get_logger -from paddle.fluid.framework import _test_eager_guard os.environ["CPU_NUM"] = "1" @@ -161,7 +160,7 @@ class TestUserDefinedActPreprocess(unittest.TestCase): _logger.info("test act_preprocess") self.imperative_qat = ImperativeQuantAware(act_preprocess_layer=PACT) - def func_quant_aware_training(self): + def test_quant_aware_training(self): imperative_qat = self.imperative_qat seed = 1 np.random.seed(seed) @@ -263,11 +262,6 @@ class TestUserDefinedActPreprocess(unittest.TestCase): train(lenet) test(lenet) - def test_quant_aware_training(self): - with _test_eager_guard(): - self.func_quant_aware_training() - self.func_quant_aware_training() - class TestUserDefinedWeightPreprocess(TestUserDefinedActPreprocess): def setUp(self): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py index db014aef2d..d38cbd88fe 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py @@ -33,7 +33,6 @@ from imperative_test_utils import ( train_lenet, ImperativeLenetWithSkipQuant, ) -from paddle.fluid.framework import _test_eager_guard os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): @@ -45,7 +44,7 @@ _logger = get_logger( class TestImperativeOutSclae(unittest.TestCase): - def func_out_scale_acc(self): + def test_out_scale_acc(self): paddle.disable_static() seed = 1000 lr = 0.1 @@ -141,11 +140,6 @@ class TestImperativeOutSclae(unittest.TestCase): if find_matmul: self.assertTrue(matmul_skip_count == 1) - def test_out_scale_acc(self): - with _test_eager_guard(): - self.func_out_scale_acc() - self.func_out_scale_acc() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 9ddebbab76..3a94b51219 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -965,14 +965,12 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) - print(sparse_x.values()) - #[1, 2, 3, 4, 5] + indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + values = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) + print(sparse_x.values()) + #[1, 2, 3, 4, 5] """ return _C_ops.sparse_values(self) @@ -990,16 +988,14 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) - dense_x = sparse_x.to_dense() - #[[0., 1., 0., 2.], - # [0., 0., 3., 0.], - # [4., 5., 0., 0.]] + indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + values = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) + dense_x = sparse_x.to_dense() + #[[0., 1., 0., 2.], + # [0., 0., 3., 0.], + # [4., 5., 0., 0.]] """ return _C_ops.sparse_to_dense(self) @@ -1018,14 +1014,12 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]] - dense_x = paddle.to_tensor(dense_x, dtype='float32') - sparse_x = dense_x.to_sparse_coo(sparse_dim=2) - #indices=[[0, 0, 1, 1], - # [1, 3, 2, 3]], - #values=[1., 2., 3., 4.] + dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]] + dense_x = paddle.to_tensor(dense_x, dtype='float32') + sparse_x = dense_x.to_sparse_coo(sparse_dim=2) + #indices=[[0, 0, 1, 1], + # [1, 3, 2, 3]], + #values=[1., 2., 3., 4.] """ return _C_ops.sparse_to_sparse_coo(self, sparse_dim) -- GitLab