From 2a82c5651ea8789c7997643c3840ed934324c4a1 Mon Sep 17 00:00:00 2001 From: Haihao Shen Date: Fri, 25 Jan 2019 19:48:06 +0800 Subject: [PATCH] Refine INT8 calibration API; shorten the iteration number to reduce test time; test=develop --- .../fluid/contrib/int8_inference/utility.py | 34 +++++++++++++++--- .../fluid/contrib/tests/test_calibration.py | 35 +++++++------------ 2 files changed, 42 insertions(+), 27 deletions(-) diff --git a/python/paddle/fluid/contrib/int8_inference/utility.py b/python/paddle/fluid/contrib/int8_inference/utility.py index 197fc5f2d..40de038f2 100644 --- a/python/paddle/fluid/contrib/int8_inference/utility.py +++ b/python/paddle/fluid/contrib/int8_inference/utility.py @@ -32,10 +32,13 @@ class Calibrator(object): def __init__(self, *args, **kwargs): self.program = kwargs['program'] - self.iterations = kwargs['iterations'] self.pretrained_model = kwargs['pretrained_model'] - self.debug = kwargs['debug'] + self.debug = kwargs['debug'] if 'debug' in kwargs else False self.algo = kwargs['algo'] + self.output = kwargs['output'] + self.feed_var_names = kwargs['feed_var_names'] + self.fetch_list = kwargs['fetch_list'] + self.exe = kwargs['exe'] self._conv_input_var_name = [] self._conv_output_var_name = [] @@ -54,17 +57,38 @@ class Calibrator(object): self._u8_output_var = [] self._s8_output_var = [] self._persistable_vars = [] + self._sampling_data = {} - def generate_sampling_program(self): self.__init_analysis() self.__generate_output_program() - def generate_quantized_data(self, sampling_data): - self.__sampling(sampling_data) + def save_int8_model(self): + self.__sampling(self._sampling_data) self.__save_scale() self.__update_program() self.__update_output_program_attr() self.__display_debug() + self.__save_offline_model() + + def sample_data(self): + ''' + Sampling the tensor data of variable. + ''' + for i in self.sampling_program.list_vars(): + if i.name in self.sampling_vars: + np_data = np.array(fluid.global_scope().find_var(i.name) + .get_tensor()) + if i.name not in self._sampling_data: + self._sampling_data[i.name] = [] + self._sampling_data[i.name].append(np_data) + + def __save_offline_model(self): + ''' + Save the quantized model to the disk. + ''' + fluid.io.save_inference_model(self.output, self.feed_var_names, + self.fetch_list, self.exe, + self.sampling_program) def __display_debug(self): if self.debug: diff --git a/python/paddle/fluid/contrib/tests/test_calibration.py b/python/paddle/fluid/contrib/tests/test_calibration.py index 17e4eb8b8..ed5ea7026 100644 --- a/python/paddle/fluid/contrib/tests/test_calibration.py +++ b/python/paddle/fluid/contrib/tests/test_calibration.py @@ -26,7 +26,7 @@ import paddle.fluid.profiler as profiler from PIL import Image, ImageEnhance import math sys.path.append('..') -import int8_inference.utility as ut +import int8_inference.utility as int8_utility random.seed(0) np.random.seed(0) @@ -120,13 +120,13 @@ class TestCalibration(unittest.TestCase): def setUp(self): # TODO(guomingz): Put the download process in the cmake. # Download and unzip test data set - imagenet_dl_url = 'http://paddle-inference-dist.bj.bcebos.com/int8/calibration_test_data.tar.gz' + imagenet_dl_url = 'http://paddle-inference-dist.cdn.bcebos.com/int8/calibration_test_data.tar.gz' zip_file_name = imagenet_dl_url.split('/')[-1] cmd = 'rm -rf data {} && mkdir data && wget {} && tar xvf {} -C data'.format( zip_file_name, imagenet_dl_url, zip_file_name) os.system(cmd) # resnet50 fp32 data - resnet50_fp32_model_url = 'http://paddle-inference-dist.bj.bcebos.com/int8/resnet50_int8_model.tar.gz' + resnet50_fp32_model_url = 'http://paddle-inference-dist.cdn.bcebos.com/int8/resnet50_int8_model.tar.gz' resnet50_zip_name = resnet50_fp32_model_url.split('/')[-1] resnet50_unzip_folder_name = 'resnet50_fp32' cmd = 'rm -rf {} {} && mkdir {} && wget {} && tar xvf {} -C {}'.format( @@ -135,8 +135,7 @@ class TestCalibration(unittest.TestCase): resnet50_zip_name, resnet50_unzip_folder_name) os.system(cmd) - self.iterations = 100 - self.skip_batch_num = 5 + self.iterations = 50 def run_program(self, model_path, generate_int8=False, algo='direct'): image_shape = [3, 224, 224] @@ -163,16 +162,15 @@ class TestCalibration(unittest.TestCase): print("Start calibration ...") - calibrator = ut.Calibrator( + calibrator = int8_utility.Calibrator( program=infer_program, pretrained_model=model_path, - iterations=100, - debug=False, - algo=algo) - - sampling_data = {} + algo=algo, + exe=exe, + output=int8_model, + feed_var_names=feed_dict, + fetch_list=fetch_targets) - calibrator.generate_sampling_program() test_info = [] cnt = 0 for batch_id, data in enumerate(val_reader()): @@ -192,13 +190,7 @@ class TestCalibration(unittest.TestCase): feed_dict[1]: label}, fetch_list=fetch_targets) if generate_int8: - for i in calibrator.sampling_program.list_vars(): - if i.name in calibrator.sampling_vars: - np_data = np.array(fluid.global_scope().find_var(i.name) - .get_tensor()) - if i.name not in sampling_data: - sampling_data[i.name] = [] - sampling_data[i.name].append(np_data) + calibrator.sample_data() test_info.append(np.mean(acc1) * len(data)) cnt += len(data) @@ -209,9 +201,8 @@ class TestCalibration(unittest.TestCase): break if generate_int8: - calibrator.generate_quantized_data(sampling_data) - fluid.io.save_inference_model(int8_model, feed_dict, fetch_targets, - exe, calibrator.sampling_program) + calibrator.save_int8_model() + print( "Calibration is done and the corresponding files were generated at {}". format(os.path.abspath("calibration_out"))) -- GitLab