diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py index ccfd85bf89ed4b7fc8ca7e02f830aca061088adf..45e96e7cb4ad09a7d9f200af8aa168137087f580 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py @@ -25,7 +25,7 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.optimizer import AdamOptimizer -from paddle.fluid.framework import IrGraph, _test_eager_guard +from paddle.fluid.framework import IrGraph from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware from paddle.nn import Sequential from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -139,7 +139,7 @@ class TestImperativeOutSclae(unittest.TestCase): def tearDown(self): self.root_path.cleanup() - def func_out_scale_acc(self): + def test_out_scale_acc(self): seed = 1000 lr = 0.001 @@ -208,11 +208,6 @@ class TestImperativeOutSclae(unittest.TestCase): msg='Failed to do the imperative qat.', ) - def test_out_scale_acc(self): - with _test_eager_guard(): - self.func_out_scale_acc() - self.func_out_scale_acc() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py index df182f6c9c8c4651aa6caec46e39a11ad199dbfb..161700cb2f085359c69f1d02a20ecddc7cb70a56 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py @@ -28,7 +28,6 @@ import paddle.fluid as fluid from paddle.fluid.contrib.slim.quantization import * from paddle.fluid.log_helper import get_logger from paddle.dataset.common import download -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import ( fix_model_dict, @@ -208,7 +207,7 @@ class TestImperativePTQ(unittest.TestCase): break return top1_correct_num / total_num - def func_ptq(self): + def test_ptq(self): start_time = time.time() self.set_vars() @@ -266,14 +265,9 @@ class TestImperativePTQ(unittest.TestCase): end_time = time.time() print("total time: %ss \n" % (end_time - start_time)) - def test_ptq(self): - with _test_eager_guard(): - self.func_ptq() - self.func_ptq() - class TestImperativePTQfuse(TestImperativePTQ): - def func_ptq(self): + def test_ptq(self): start_time = time.time() self.set_vars() @@ -342,11 +336,6 @@ class TestImperativePTQfuse(TestImperativePTQ): end_time = time.time() print("total time: %ss \n" % (end_time - start_time)) - def test_ptq(self): - with _test_eager_guard(): - self.func_ptq() - self.func_ptq() - class TestImperativePTQHist(TestImperativePTQ): def set_vars(self): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py index 6169d1db1e8b900ca64760e0c78a6f6cd9a5b30c..43e4f0686346bbbe3ef21a330978e8f7ad7d4cb6 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py @@ -33,7 +33,6 @@ from paddle.nn.quant.quant_layers import ( QuantizedConv2D, QuantizedConv2DTranspose, ) -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict, ImperativeLenet paddle.enable_static() @@ -63,7 +62,7 @@ class TestImperativeQat(unittest.TestCase): self.diff_threshold = 0.03125 self.fuse_conv_bn = False - def func_qat(self): + def test_qat(self): self.set_vars() imperative_qat = ImperativeQuantAware( @@ -244,11 +243,6 @@ class TestImperativeQat(unittest.TestCase): delta_value = fp32_acc - quant_acc self.assertLessEqual(delta_value, self.diff_threshold) - def test_qat(self): - with _test_eager_guard(): - self.func_qat() - self.func_qat() - class TestImperativeQatONNXFormat(unittest.TestCase): def set_vars(self): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py index ea0a38ea5519a287f78e1091d03a58ee021578c1..0f7608927007adee7fcb6d30ca5dae753846ae48 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py @@ -26,7 +26,6 @@ import paddle.fluid as fluid from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware from paddle.fluid.log_helper import get_logger from paddle.dataset.common import download -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict, ImperativeLenet os.environ["CPU_NUM"] = "1" @@ -188,7 +187,7 @@ class TestImperativeQatAmp(unittest.TestCase): acc_top1 = sum(acc_top1_list) / len(acc_top1_list) return acc_top1 - def ptq(self): + def test_ptq(self): start_time = time.time() self.set_vars() @@ -239,11 +238,6 @@ class TestImperativeQatAmp(unittest.TestCase): end_time = time.time() print("total time: %ss" % (end_time - start_time)) - def test_ptq(self): - self.ptq() - with _test_eager_guard(): - self.ptq() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py index f56c90f5f492d8fc3d70319ca65958807db23182..7cf3e9ad2b0b3ec84de0584239283a3b65577a49 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py @@ -38,7 +38,6 @@ from paddle.nn.quant.quant_layers import ( QuantizedConv2D, QuantizedConv2DTranspose, ) -from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict paddle.enable_static() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py index 650a0afdeea8a84b282e51c7c7ebcb8766ced19b..ae18f4a4f24bb581a881d2910ccfb6117c3137f0 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py @@ -26,7 +26,6 @@ from paddle.nn import Sequential from paddle.nn import Linear from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose from paddle.fluid.log_helper import get_logger -from paddle.fluid.framework import _test_eager_guard os.environ["CPU_NUM"] = "1" @@ -161,7 +160,7 @@ class TestUserDefinedActPreprocess(unittest.TestCase): _logger.info("test act_preprocess") self.imperative_qat = ImperativeQuantAware(act_preprocess_layer=PACT) - def func_quant_aware_training(self): + def test_quant_aware_training(self): imperative_qat = self.imperative_qat seed = 1 np.random.seed(seed) @@ -263,11 +262,6 @@ class TestUserDefinedActPreprocess(unittest.TestCase): train(lenet) test(lenet) - def test_quant_aware_training(self): - with _test_eager_guard(): - self.func_quant_aware_training() - self.func_quant_aware_training() - class TestUserDefinedWeightPreprocess(TestUserDefinedActPreprocess): def setUp(self): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py index db014aef2df1b51bd5c0595006ae12266bd0ccad..d38cbd88fe02ac4cc3830ac7466c23b5f34ebdf2 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py @@ -33,7 +33,6 @@ from imperative_test_utils import ( train_lenet, ImperativeLenetWithSkipQuant, ) -from paddle.fluid.framework import _test_eager_guard os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): @@ -45,7 +44,7 @@ _logger = get_logger( class TestImperativeOutSclae(unittest.TestCase): - def func_out_scale_acc(self): + def test_out_scale_acc(self): paddle.disable_static() seed = 1000 lr = 0.1 @@ -141,11 +140,6 @@ class TestImperativeOutSclae(unittest.TestCase): if find_matmul: self.assertTrue(matmul_skip_count == 1) - def test_out_scale_acc(self): - with _test_eager_guard(): - self.func_out_scale_acc() - self.func_out_scale_acc() - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 9ddebbab7670eb4b353bceef6d4eec0af795414f..3a94b51219e361517a61ae5046a1dae98c885d1a 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -965,14 +965,12 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) - print(sparse_x.values()) - #[1, 2, 3, 4, 5] + indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + values = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) + print(sparse_x.values()) + #[1, 2, 3, 4, 5] """ return _C_ops.sparse_values(self) @@ -990,16 +988,14 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) - dense_x = sparse_x.to_dense() - #[[0., 1., 0., 2.], - # [0., 0., 3., 0.], - # [4., 5., 0., 0.]] + indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + values = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) + dense_x = sparse_x.to_dense() + #[[0., 1., 0., 2.], + # [0., 0., 3., 0.], + # [4., 5., 0., 0.]] """ return _C_ops.sparse_to_dense(self) @@ -1018,14 +1014,12 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - from paddle.fluid.framework import _test_eager_guard - with _test_eager_guard(): - dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]] - dense_x = paddle.to_tensor(dense_x, dtype='float32') - sparse_x = dense_x.to_sparse_coo(sparse_dim=2) - #indices=[[0, 0, 1, 1], - # [1, 3, 2, 3]], - #values=[1., 2., 3., 4.] + dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]] + dense_x = paddle.to_tensor(dense_x, dtype='float32') + sparse_x = dense_x.to_sparse_coo(sparse_dim=2) + #indices=[[0, 0, 1, 1], + # [1, 3, 2, 3]], + #values=[1., 2., 3., 4.] """ return _C_ops.sparse_to_sparse_coo(self, sparse_dim)