From 38a591d63f656a8562099707be2e3ad5195f72cb Mon Sep 17 00:00:00 2001 From: czhu15 <41610754+czhu15@users.noreply.github.com> Date: Wed, 19 Jun 2019 15:35:56 +0800 Subject: [PATCH] Merge PR 18160 and PR 18177 into release 1.5 branch (#18195) * [Cherry-pick] change dirname mobilenet to mobilenetv1 (#18160) test=release/1.5 * [Cherry-pick] update mkldnn int8v2 doc (#18177) test=release/1.5 --- .../paddle/fluid/contrib/int8_inference/README.md | 15 ++++++++------- .../fluid/contrib/slim/tests/CMakeLists.txt | 6 +++--- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/contrib/int8_inference/README.md b/python/paddle/fluid/contrib/int8_inference/README.md index 3228610f96..7dc7c8d2a3 100644 --- a/python/paddle/fluid/contrib/int8_inference/README.md +++ b/python/paddle/fluid/contrib/int8_inference/README.md @@ -6,7 +6,7 @@ PaddlePaddle supports offline INT8 calibration to accelerate the inference speed You need to install at least PaddlePaddle-1.3 python package `pip install paddlepaddle==1.3`. ## 1. How to generate INT8 model -You can refer to the unit test in [test_calibration.py](../tests/test_calibration.py). Basically, there are three steps: +You can refer to the unit test in [test_calibration_resnet50.py](../tests/test_calibration_resnet50.py). Basically, there are three steps: * Construct calibration object. ```python @@ -68,18 +68,19 @@ Notes: * The INT8 theoretical speedup is 4X on Intel® Xeon® Cascadelake Server (please refer to `The theoretical peak compute gains are 4x int8 OPS over fp32 OPS.` in [Reference](https://software.intel.com/en-us/articles/lower-numerical-precision-deep-learning-inference-and-training "Reference")). Therefore, op-level gain is 4X and topology-level is smaller. ## 4. How to reproduce the results -* Small dataset (Single core) +* Small dataset for ResNet-50 (Single core) ```bash -FLAGS_use_mkldnn=true python python/paddle/fluid/contrib/tests/test_calibration.py +FLAGS_use_mkldnn=true python python/paddle/fluid/contrib/tests/test_calibration_resnet50.py ``` +>Note: Change `test_calibration_resnet50.py` to `test_calibration_mobilenetv1.py` for MobileNet-V1. Same for the following commands. -* Full dataset (Single core) +* Full dataset for ResNet-50 (Single core) ```bash -FLAGS_use_mkldnn=true DATASET=full python python/paddle/fluid/contrib/tests/test_calibration.py +FLAGS_use_mkldnn=true DATASET=full python python/paddle/fluid/contrib/tests/test_calibration_resnet50.py ``` -* Full dataset (Multi-core) +* Full dataset for ResNet-50 (Multi-core) ```bash -FLAGS_use_mkldnn=true OMP_NUM_THREADS=20 DATASET=full python python/paddle/fluid/contrib/tests/test_calibration.py +FLAGS_use_mkldnn=true OMP_NUM_THREADS=20 DATASET=full python python/paddle/fluid/contrib/tests/test_calibration_resnet50.py ``` > Notes: This is an example command with 20 cores by using set `OMP_NUM_THREADS` value. diff --git a/python/paddle/fluid/contrib/slim/tests/CMakeLists.txt b/python/paddle/fluid/contrib/slim/tests/CMakeLists.txt index e61e93da3f..c59df49f62 100644 --- a/python/paddle/fluid/contrib/slim/tests/CMakeLists.txt +++ b/python/paddle/fluid/contrib/slim/tests/CMakeLists.txt @@ -42,10 +42,10 @@ if(LINUX AND WITH_MKLDNN) inference_analysis_python_api_int8_test(test_slim_int8_googlenet ${INT8_GOOGLENET_MODEL_DIR} ${INT8_DATA_DIR} ${MKLDNN_INT8_TEST_FILE}) # mobilenet int8 - set(INT8_MOBILENET_MODEL_DIR "${INT8_DATA_DIR}/mobilenet") + set(INT8_MOBILENET_MODEL_DIR "${INT8_DATA_DIR}/mobilenetv1") inference_analysis_python_api_int8_test(test_slim_int8_mobilenet ${INT8_MOBILENET_MODEL_DIR} ${INT8_DATA_DIR} ${MKLDNN_INT8_TEST_FILE}) - # temporarily adding WITH_SLIM_MKLDNN_FULL_TEST FLAG for QA testing the following UTs locally, + # temporarily adding WITH_SLIM_MKLDNN_FULL_TEST FLAG for QA testing the following UTs locally, # since the following UTs cost too much time on CI test. if (WITH_SLIM_MKLDNN_FULL_TEST) # resnet50 int8 @@ -70,7 +70,7 @@ if(LINUX AND WITH_MKLDNN) endif() endif() -# Since test_mkldnn_int8_quantization_strategy only supports testing on Linux +# Since test_mkldnn_int8_quantization_strategy only supports testing on Linux # with MKL-DNN, we remove it here for not repeating test, or not testing on other systems. list(REMOVE_ITEM TEST_OPS test_mkldnn_int8_quantization_strategy) -- GitLab