test_calibration_mobilenetv1.py 2.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#   copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
#     http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import unittest
import sys
from test_calibration_resnet50 import TestCalibration


class TestCalibrationForMobilenetv1(TestCalibration):
    def download_model(self):
        # mobilenetv1 fp32 data
        data_urls = [
            'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz'
        ]
        data_md5s = ['13892b0716d26443a8cdea15b3c6438b']
        self.model_cache_folder = self.download_data(data_urls, data_md5s,
                                                     "mobilenetv1_fp32")
        self.model = "MobileNet-V1"
        self.algo = "KL"

    def test_calibration(self):
        self.download_model()
33 34
        print("Start FP32 inference for {0} on {1} images ...".format(
            self.model, self.infer_iterations * self.batch_size))
35 36
        (fp32_throughput, fp32_latency,
         fp32_acc1) = self.run_program(self.model_cache_folder + "/model")
37 38
        print("Start INT8 calibration for {0} on {1} images ...".format(
            self.model, self.sample_iterations * self.batch_size))
39 40
        self.run_program(
            self.model_cache_folder + "/model", True, algo=self.algo)
41 42
        print("Start INT8 inference for {0} on {1} images ...".format(
            self.model, self.infer_iterations * self.batch_size))
43
        (int8_throughput, int8_latency,
G
guomingz 已提交
44
         int8_acc1) = self.run_program(self.int8_model)
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
        delta_value = fp32_acc1 - int8_acc1
        self.assertLess(delta_value, 0.01)
        print(
            "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}".
            format(self.model, self.batch_size, fp32_throughput, fp32_latency,
                   fp32_acc1))
        print(
            "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}".
            format(self.model, self.batch_size, int8_throughput, int8_latency,
                   int8_acc1))
        sys.stdout.flush()


if __name__ == '__main__':
    unittest.main()