未验证 提交 7fc25f22 编写于 作者: 姜永久 提交者: GitHub

Yj/rm imperative dygraph eager tests (#48782)

* rm imperative op eager guard tests

* modify varbase patch eager guard doc
上级 afeb394e
......@@ -25,7 +25,7 @@ import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.framework import IrGraph, _test_eager_guard
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.nn import Sequential
from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
......@@ -139,7 +139,7 @@ class TestImperativeOutSclae(unittest.TestCase):
def tearDown(self):
self.root_path.cleanup()
def func_out_scale_acc(self):
def test_out_scale_acc(self):
seed = 1000
lr = 0.001
......@@ -208,11 +208,6 @@ class TestImperativeOutSclae(unittest.TestCase):
msg='Failed to do the imperative qat.',
)
def test_out_scale_acc(self):
with _test_eager_guard():
self.func_out_scale_acc()
self.func_out_scale_acc()
if __name__ == '__main__':
unittest.main()
......@@ -28,7 +28,6 @@ import paddle.fluid as fluid
from paddle.fluid.contrib.slim.quantization import *
from paddle.fluid.log_helper import get_logger
from paddle.dataset.common import download
from paddle.fluid.framework import _test_eager_guard
from imperative_test_utils import (
fix_model_dict,
......@@ -208,7 +207,7 @@ class TestImperativePTQ(unittest.TestCase):
break
return top1_correct_num / total_num
def func_ptq(self):
def test_ptq(self):
start_time = time.time()
self.set_vars()
......@@ -266,14 +265,9 @@ class TestImperativePTQ(unittest.TestCase):
end_time = time.time()
print("total time: %ss \n" % (end_time - start_time))
def test_ptq(self):
with _test_eager_guard():
self.func_ptq()
self.func_ptq()
class TestImperativePTQfuse(TestImperativePTQ):
def func_ptq(self):
def test_ptq(self):
start_time = time.time()
self.set_vars()
......@@ -342,11 +336,6 @@ class TestImperativePTQfuse(TestImperativePTQ):
end_time = time.time()
print("total time: %ss \n" % (end_time - start_time))
def test_ptq(self):
with _test_eager_guard():
self.func_ptq()
self.func_ptq()
class TestImperativePTQHist(TestImperativePTQ):
def set_vars(self):
......
......@@ -33,7 +33,6 @@ from paddle.nn.quant.quant_layers import (
QuantizedConv2D,
QuantizedConv2DTranspose,
)
from paddle.fluid.framework import _test_eager_guard
from imperative_test_utils import fix_model_dict, ImperativeLenet
paddle.enable_static()
......@@ -63,7 +62,7 @@ class TestImperativeQat(unittest.TestCase):
self.diff_threshold = 0.03125
self.fuse_conv_bn = False
def func_qat(self):
def test_qat(self):
self.set_vars()
imperative_qat = ImperativeQuantAware(
......@@ -244,11 +243,6 @@ class TestImperativeQat(unittest.TestCase):
delta_value = fp32_acc - quant_acc
self.assertLessEqual(delta_value, self.diff_threshold)
def test_qat(self):
with _test_eager_guard():
self.func_qat()
self.func_qat()
class TestImperativeQatONNXFormat(unittest.TestCase):
def set_vars(self):
......
......@@ -26,7 +26,6 @@ import paddle.fluid as fluid
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.log_helper import get_logger
from paddle.dataset.common import download
from paddle.fluid.framework import _test_eager_guard
from imperative_test_utils import fix_model_dict, ImperativeLenet
os.environ["CPU_NUM"] = "1"
......@@ -188,7 +187,7 @@ class TestImperativeQatAmp(unittest.TestCase):
acc_top1 = sum(acc_top1_list) / len(acc_top1_list)
return acc_top1
def ptq(self):
def test_ptq(self):
start_time = time.time()
self.set_vars()
......@@ -239,11 +238,6 @@ class TestImperativeQatAmp(unittest.TestCase):
end_time = time.time()
print("total time: %ss" % (end_time - start_time))
def test_ptq(self):
self.ptq()
with _test_eager_guard():
self.ptq()
if __name__ == '__main__':
unittest.main()
......@@ -38,7 +38,6 @@ from paddle.nn.quant.quant_layers import (
QuantizedConv2D,
QuantizedConv2DTranspose,
)
from paddle.fluid.framework import _test_eager_guard
from imperative_test_utils import fix_model_dict
paddle.enable_static()
......
......@@ -26,7 +26,6 @@ from paddle.nn import Sequential
from paddle.nn import Linear
from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose
from paddle.fluid.log_helper import get_logger
from paddle.fluid.framework import _test_eager_guard
os.environ["CPU_NUM"] = "1"
......@@ -161,7 +160,7 @@ class TestUserDefinedActPreprocess(unittest.TestCase):
_logger.info("test act_preprocess")
self.imperative_qat = ImperativeQuantAware(act_preprocess_layer=PACT)
def func_quant_aware_training(self):
def test_quant_aware_training(self):
imperative_qat = self.imperative_qat
seed = 1
np.random.seed(seed)
......@@ -263,11 +262,6 @@ class TestUserDefinedActPreprocess(unittest.TestCase):
train(lenet)
test(lenet)
def test_quant_aware_training(self):
with _test_eager_guard():
self.func_quant_aware_training()
self.func_quant_aware_training()
class TestUserDefinedWeightPreprocess(TestUserDefinedActPreprocess):
def setUp(self):
......
......@@ -33,7 +33,6 @@ from imperative_test_utils import (
train_lenet,
ImperativeLenetWithSkipQuant,
)
from paddle.fluid.framework import _test_eager_guard
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
......@@ -45,7 +44,7 @@ _logger = get_logger(
class TestImperativeOutSclae(unittest.TestCase):
def func_out_scale_acc(self):
def test_out_scale_acc(self):
paddle.disable_static()
seed = 1000
lr = 0.1
......@@ -141,11 +140,6 @@ class TestImperativeOutSclae(unittest.TestCase):
if find_matmul:
self.assertTrue(matmul_skip_count == 1)
def test_out_scale_acc(self):
with _test_eager_guard():
self.func_out_scale_acc()
self.func_out_scale_acc()
if __name__ == '__main__':
unittest.main()
......@@ -965,14 +965,12 @@ def monkey_patch_varbase():
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
print(sparse_x.values())
#[1, 2, 3, 4, 5]
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
print(sparse_x.values())
#[1, 2, 3, 4, 5]
"""
return _C_ops.sparse_values(self)
......@@ -990,16 +988,14 @@ def monkey_patch_varbase():
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
dense_x = sparse_x.to_dense()
#[[0., 1., 0., 2.],
# [0., 0., 3., 0.],
# [4., 5., 0., 0.]]
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
dense_x = sparse_x.to_dense()
#[[0., 1., 0., 2.],
# [0., 0., 3., 0.],
# [4., 5., 0., 0.]]
"""
return _C_ops.sparse_to_dense(self)
......@@ -1018,14 +1014,12 @@ def monkey_patch_varbase():
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]]
dense_x = paddle.to_tensor(dense_x, dtype='float32')
sparse_x = dense_x.to_sparse_coo(sparse_dim=2)
#indices=[[0, 0, 1, 1],
# [1, 3, 2, 3]],
#values=[1., 2., 3., 4.]
dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]]
dense_x = paddle.to_tensor(dense_x, dtype='float32')
sparse_x = dense_x.to_sparse_coo(sparse_dim=2)
#indices=[[0, 0, 1, 1],
# [1, 3, 2, 3]],
#values=[1., 2., 3., 4.]
"""
return _C_ops.sparse_to_sparse_coo(self, sparse_dim)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册