diff --git a/mindspore/lite/README.md b/mindspore/lite/README.md index 25a21b744391a421deb3861b2b8e8dde0b360a38..abd29dcab10e51a1f829071c77fc4e40949528e5 100644 --- a/mindspore/lite/README.md +++ b/mindspore/lite/README.md @@ -37,11 +37,11 @@ For more details please check out our [MindSpore Lite Architecture Guide](https: The pre-trained models provided by MindSpore include: [Image Classification](https://download.mindspore.cn/model_zoo/official/lite/) and [Object Detection](https://download.mindspore.cn/model_zoo/official/lite/). More models will be provided in the feature. - MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. See [Retraining](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html). + MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. 2. Model converter and optimization - If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure. + If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/en/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure. MindSpore also provides a tool to convert models running on IoT devices . @@ -51,6 +51,6 @@ For more details please check out our [MindSpore Lite Architecture Guide](https: 4. Inference - Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/runtime.html) is the process of running input data through the model to get output. + Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/en/master/use/runtime.html) is the process of running input data through the model to get output. MindSpore provides a series of pre-trained models that can be deployed on mobile device [example](#TODO). diff --git a/mindspore/lite/README_CN.md b/mindspore/lite/README_CN.md index d2051cae3be462ed6af46b44fe38ea1fc096b548..75b880580aee2719ba56b40630c301e7b3b8a25f 100644 --- a/mindspore/lite/README_CN.md +++ b/mindspore/lite/README_CN.md @@ -45,7 +45,7 @@ MindSpore Lite是MindSpore推出的端云协同的、轻量化、高性能AI推 MindSpore提供的预训练模型包括:[图像分类(Image Classification)](https://download.mindspore.cn/model_zoo/official/lite/)和[目标检测(Object Detection)](https://download.mindspore.cn/model_zoo/official/lite/)。后续MindSpore团队会增加更多的预置模型。 - MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。参见[重训练](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html)。 + MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。 2. 模型转换/优化 diff --git a/mindspore/lite/tools/benchmark/benchmark.h b/mindspore/lite/tools/benchmark/benchmark.h index 0df9f5424bcac520ee48f8bef91633c0d0d2df67..f8dfdd1e44e84ffa828c6aee50677fa9258b4595 100644 --- a/mindspore/lite/tools/benchmark/benchmark.h +++ b/mindspore/lite/tools/benchmark/benchmark.h @@ -54,8 +54,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser { // common AddFlag(&BenchmarkFlags::modelPath, "modelPath", "Input model path", ""); AddFlag(&BenchmarkFlags::inDataPath, "inDataPath", "Input data path, if not set, use random input", ""); - AddFlag(&BenchmarkFlags::inDataTypeIn, "inDataType", "Input data type. img | bin", "bin"); - AddFlag(&BenchmarkFlags::omModelPath, "omModelPath", "OM model path, only required when device is NPU", ""); AddFlag(&BenchmarkFlags::device, "device", "CPU | GPU", "CPU"); AddFlag(&BenchmarkFlags::cpuBindMode, "cpuBindMode", "Input -1 for MID_CPU, 1 for HIGHER_CPU, 0 for NO_BIND, defalut value: 1", 1); @@ -67,8 +65,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser { // MarkAccuracy AddFlag(&BenchmarkFlags::calibDataPath, "calibDataPath", "Calibration data file path", ""); AddFlag(&BenchmarkFlags::accuracyThreshold, "accuracyThreshold", "Threshold of accuracy", 0.5); - // Resize - AddFlag(&BenchmarkFlags::resizeDimsIn, "resizeDims", "Dims to resize to", ""); } ~BenchmarkFlags() override = default; @@ -83,7 +79,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser { std::string inDataPath; std::vector input_data_list; InDataType inDataType; - std::string inDataTypeIn; + std::string inDataTypeIn = "bin"; int cpuBindMode = 1; // MarkPerformance int loopCount; @@ -94,10 +90,9 @@ class MS_API BenchmarkFlags : public virtual FlagParser { std::string calibDataPath; float accuracyThreshold; // Resize - std::string resizeDimsIn; + std::string resizeDimsIn = ""; std::vector> resizeDims; - std::string omModelPath; std::string device; };