未验证 提交 54be8d9b 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #16747 from chuanqi129/reduce_test_calibration_time

Reduce test_calibration test time by using large batchsize
...@@ -147,10 +147,11 @@ class TestCalibrationForResnet50(unittest.TestCase): ...@@ -147,10 +147,11 @@ class TestCalibrationForResnet50(unittest.TestCase):
self.data_cache_folder) self.data_cache_folder)
os.system(cmd) os.system(cmd)
self.batch_size = 1 self.batch_size = 1 if os.environ.get('DATASET') == 'full' else 50
self.sample_iterations = 50 self.sample_iterations = 50 if os.environ.get(
'DATASET') == 'full' else 1
self.infer_iterations = 50000 if os.environ.get( self.infer_iterations = 50000 if os.environ.get(
'DATASET') == 'full' else 50 'DATASET') == 'full' else 1
def cache_unzipping(self, target_folder, zip_path): def cache_unzipping(self, target_folder, zip_path):
if not os.path.exists(target_folder): if not os.path.exists(target_folder):
...@@ -279,15 +280,15 @@ class TestCalibrationForResnet50(unittest.TestCase): ...@@ -279,15 +280,15 @@ class TestCalibrationForResnet50(unittest.TestCase):
def test_calibration(self): def test_calibration(self):
self.download_model() self.download_model()
print("Start FP32 inference for {0} on {1} images ...").format( print("Start FP32 inference for {0} on {1} images ...").format(
self.model, self.infer_iterations) self.model, self.infer_iterations * self.batch_size)
(fp32_throughput, fp32_latency, (fp32_throughput, fp32_latency,
fp32_acc1) = self.run_program(self.model_cache_folder + "/model") fp32_acc1) = self.run_program(self.model_cache_folder + "/model")
print("Start INT8 calibration for {0} on {1} images ...").format( print("Start INT8 calibration for {0} on {1} images ...").format(
self.model, self.sample_iterations) self.model, self.sample_iterations * self.batch_size)
self.run_program( self.run_program(
self.model_cache_folder + "/model", True, algo=self.algo) self.model_cache_folder + "/model", True, algo=self.algo)
print("Start INT8 inference for {0} on {1} images ...").format( print("Start INT8 inference for {0} on {1} images ...").format(
self.model, self.infer_iterations) self.model, self.infer_iterations * self.batch_size)
(int8_throughput, int8_latency, (int8_throughput, int8_latency,
int8_acc1) = self.run_program("calibration_out") int8_acc1) = self.run_program("calibration_out")
delta_value = fp32_acc1 - int8_acc1 delta_value = fp32_acc1 - int8_acc1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册