From 18d3c686dcc015088f6c01ae126575beef794c96 Mon Sep 17 00:00:00 2001 From: hangq Date: Sun, 30 Aug 2020 20:22:10 +0800 Subject: [PATCH] remove unused benchmark option --- mindspore/lite/README.md | 6 +++--- mindspore/lite/README_CN.md | 2 +- mindspore/lite/tools/benchmark/benchmark.h | 9 ++------- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/mindspore/lite/README.md b/mindspore/lite/README.md index 25a21b744..abd29dcab 100644 --- a/mindspore/lite/README.md +++ b/mindspore/lite/README.md @@ -37,11 +37,11 @@ For more details please check out our [MindSpore Lite Architecture Guide](https: The pre-trained models provided by MindSpore include: [Image Classification](https://download.mindspore.cn/model_zoo/official/lite/) and [Object Detection](https://download.mindspore.cn/model_zoo/official/lite/). More models will be provided in the feature. - MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. See [Retraining](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html). + MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. 2. Model converter and optimization - If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure. + If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/en/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure. MindSpore also provides a tool to convert models running on IoT devices . @@ -51,6 +51,6 @@ For more details please check out our [MindSpore Lite Architecture Guide](https: 4. Inference - Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/runtime.html) is the process of running input data through the model to get output. + Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/en/master/use/runtime.html) is the process of running input data through the model to get output. MindSpore provides a series of pre-trained models that can be deployed on mobile device [example](#TODO). diff --git a/mindspore/lite/README_CN.md b/mindspore/lite/README_CN.md index d2051cae3..75b880580 100644 --- a/mindspore/lite/README_CN.md +++ b/mindspore/lite/README_CN.md @@ -45,7 +45,7 @@ MindSpore Lite是MindSpore推出的端云协同的、轻量化、高性能AI推 MindSpore提供的预训练模型包括:[图像分类(Image Classification)](https://download.mindspore.cn/model_zoo/official/lite/)和[目标检测(Object Detection)](https://download.mindspore.cn/model_zoo/official/lite/)。后续MindSpore团队会增加更多的预置模型。 - MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。参见[重训练](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html)。 + MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。 2. 模型转换/优化 diff --git a/mindspore/lite/tools/benchmark/benchmark.h b/mindspore/lite/tools/benchmark/benchmark.h index 0df9f5424..f8dfdd1e4 100644 --- a/mindspore/lite/tools/benchmark/benchmark.h +++ b/mindspore/lite/tools/benchmark/benchmark.h @@ -54,8 +54,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser { // common AddFlag(&BenchmarkFlags::modelPath, "modelPath", "Input model path", ""); AddFlag(&BenchmarkFlags::inDataPath, "inDataPath", "Input data path, if not set, use random input", ""); - AddFlag(&BenchmarkFlags::inDataTypeIn, "inDataType", "Input data type. img | bin", "bin"); - AddFlag(&BenchmarkFlags::omModelPath, "omModelPath", "OM model path, only required when device is NPU", ""); AddFlag(&BenchmarkFlags::device, "device", "CPU | GPU", "CPU"); AddFlag(&BenchmarkFlags::cpuBindMode, "cpuBindMode", "Input -1 for MID_CPU, 1 for HIGHER_CPU, 0 for NO_BIND, defalut value: 1", 1); @@ -67,8 +65,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser { // MarkAccuracy AddFlag(&BenchmarkFlags::calibDataPath, "calibDataPath", "Calibration data file path", ""); AddFlag(&BenchmarkFlags::accuracyThreshold, "accuracyThreshold", "Threshold of accuracy", 0.5); - // Resize - AddFlag(&BenchmarkFlags::resizeDimsIn, "resizeDims", "Dims to resize to", ""); } ~BenchmarkFlags() override = default; @@ -83,7 +79,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser { std::string inDataPath; std::vector input_data_list; InDataType inDataType; - std::string inDataTypeIn; + std::string inDataTypeIn = "bin"; int cpuBindMode = 1; // MarkPerformance int loopCount; @@ -94,10 +90,9 @@ class MS_API BenchmarkFlags : public virtual FlagParser { std::string calibDataPath; float accuracyThreshold; // Resize - std::string resizeDimsIn; + std::string resizeDimsIn = ""; std::vector> resizeDims; - std::string omModelPath; std::string device; }; -- GitLab