diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py index 96b3b67103b81a503e36db622d648d26ca832a50..99a23525409f3746982db402132db5d04f936bd4 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py @@ -17,6 +17,8 @@ from __future__ import print_function import os import numpy as np import random +import shutil +import time import unittest import logging import paddle @@ -157,6 +159,20 @@ class TestImperativeQat(unittest.TestCase): QAT = quantization-aware training """ + @classmethod + def setUpClass(cls): + timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) + cls.root_path = os.path.join(os.getcwd(), "imperative_qat_" + timestamp) + cls.save_path = os.path.join(cls.root_path, "lenet") + cls.dynamic_root_path = os.path.join(os.getcwd(), + "dynamic_mnist_" + timestamp) + cls.dynamic_save_path = os.path.join(cls.dynamic_root_path, "model") + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.root_path) + shutil.rmtree(cls.dynamic_root_path) + def test_qat_save(self): imperative_qat = ImperativeQuantAware( weight_quantize_type='abs_max', @@ -206,6 +222,8 @@ class TestImperativeQat(unittest.TestCase): "Train | At epoch {} step {}: loss = {:}, acc= {:}". format(epoch, batch_id, avg_loss.numpy(), acc.numpy())) + if batch_id == 500: # For shortening CI time + break lenet.eval() for batch_id, data in enumerate(test_reader()): @@ -242,11 +260,9 @@ class TestImperativeQat(unittest.TestCase): before_save = lenet(test_img) # save inference quantized model - path = "./qat_infer_model/lenet" - save_dir = "./qat_infer_model" paddle.jit.save( layer=lenet, - path=path, + path=TestImperativeQat.save_path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32') @@ -259,7 +275,7 @@ class TestImperativeQat(unittest.TestCase): exe = fluid.Executor(place) [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model( - dirname=save_dir, + dirname=TestImperativeQat.root_path, executor=exe, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX) @@ -351,7 +367,7 @@ class TestImperativeQat(unittest.TestCase): paddle.jit.save( layer=lenet, - path="./dynamic_mnist/model", + path=TestImperativeQat.dynamic_save_path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32') diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_addquantdequant.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_addquantdequant.py index d76e4825e0d622c8e249b25c5f77c3238ca9739c..f5b3e89ef415c113add9b04f65d3f27cd16244a1 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_addquantdequant.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_addquantdequant.py @@ -17,6 +17,8 @@ from __future__ import print_function import os import numpy as np import random +import shutil +import time import unittest import logging import paddle @@ -185,6 +187,21 @@ class ImperativeLenet(fluid.dygraph.Layer): class TestImperativeAddQuantDequant(unittest.TestCase): + @classmethod + def setUpClass(cls): + timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) + cls.root_path = os.path.join(os.getcwd(), + "imperative_qat_aqd_" + timestamp) + cls.save_path = os.path.join(cls.root_path, "lenet") + cls.dynamic_root_path = os.path.join(os.getcwd(), + "dynamic_mnist_aqd_" + timestamp) + cls.dynamic_save_path = os.path.join(cls.dynamic_root_path, "model") + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.root_path) + shutil.rmtree(cls.dynamic_root_path) + def test_qat_save(self): imperative_qat = ImperativeQuantAware( @@ -228,6 +245,8 @@ class TestImperativeAddQuantDequant(unittest.TestCase): "Train | At epoch {} step {}: loss = {:}, acc= {:}". format(epoch, batch_id, avg_loss.numpy(), acc.numpy())) + if batch_id == 500: # For shortening CI time + break lenet.eval() for batch_id, data in enumerate(test_reader()): @@ -264,11 +283,9 @@ class TestImperativeAddQuantDequant(unittest.TestCase): before_save = lenet(test_img) # save inference quantized model - path = "./qat_infer_model/lenet" - save_dir = "./qat_infer_model" paddle.jit.save( layer=lenet, - path=path, + path=TestImperativeAddQuantDequant.save_path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32') @@ -280,7 +297,7 @@ class TestImperativeAddQuantDequant(unittest.TestCase): exe = fluid.Executor(place) [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model( - dirname=save_dir, + dirname=TestImperativeAddQuantDequant.root_path, executor=exe, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX) @@ -378,7 +395,7 @@ class TestImperativeAddQuantDequant(unittest.TestCase): lenet.eval() paddle.jit.save( layer=lenet, - path="./dynamic_mnist/model", + path=TestImperativeAddQuantDequant.dynamic_save_path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32')