提交 0c5f7377 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!5505 update README

Merge pull request !5505 from hangq/master
......@@ -37,11 +37,11 @@ For more details please check out our [MindSpore Lite Architecture Guide](https:
The pre-trained models provided by MindSpore include: [Image Classification](https://download.mindspore.cn/model_zoo/official/lite/) and [Object Detection](https://download.mindspore.cn/model_zoo/official/lite/). More models will be provided in the feature.
MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. See [Retraining](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html).
MindSpore allows you to retrain pre-trained models to perform other tasks.
2. Model converter and optimization
If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure.
If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/en/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure.
MindSpore also provides a tool to convert models running on IoT devices .
......@@ -51,6 +51,6 @@ For more details please check out our [MindSpore Lite Architecture Guide](https:
4. Inference
Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/runtime.html) is the process of running input data through the model to get output.
Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/en/master/use/runtime.html) is the process of running input data through the model to get output.
MindSpore provides a series of pre-trained models that can be deployed on mobile device [example](#TODO).
......@@ -45,7 +45,7 @@ MindSpore Lite是MindSpore推出的端云协同的、轻量化、高性能AI推
MindSpore提供的预训练模型包括:[图像分类(Image Classification)](https://download.mindspore.cn/model_zoo/official/lite/)[目标检测(Object Detection)](https://download.mindspore.cn/model_zoo/official/lite/)。后续MindSpore团队会增加更多的预置模型。
MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。参见[重训练](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html)
MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。
2. 模型转换/优化
......
......@@ -33,9 +33,6 @@ class ModelImpl {
void FreeMetaGraph();
int BuildOps();
protected:
PrimitiveC *CopyPrimitive(const schema::Primitive *src_prim);
protected:
const char *model_buf_;
size_t buf_size_;
......
......@@ -156,23 +156,50 @@ void PrimitiveC::SetQuantType(schema::QuantType quant_type) { this->quant_type_
schema::QuantType PrimitiveC::GetQuantType() const { return quant_type_; }
std::shared_ptr<PrimitiveC> GetReturnPrim() {
auto return_primitiveT = new schema::PrimitiveT;
auto return_primitiveT = new (std::nothrow) schema::PrimitiveT;
if (return_primitiveT == nullptr) {
MS_LOG(ERROR) << "new PrimitiveT failed";
return nullptr;
}
return_primitiveT->value.type = schema::PrimitiveType_Return;
return_primitiveT->value.value = new schema::ReturnT;
if (return_primitiveT->value.value == nullptr) {
MS_LOG(ERROR) << "new ReturnT failed";
delete (return_primitiveT);
return nullptr;
}
return std::make_shared<Return>(return_primitiveT);
}
std::shared_ptr<PrimitiveC> GetMakeTuplePrim() {
auto make_tuple_primitiveT = new schema::PrimitiveT;
if (make_tuple_primitiveT == nullptr) {
MS_LOG(ERROR) << "new PrimitiveT failed";
return nullptr;
}
make_tuple_primitiveT->value.type = schema::PrimitiveType_MakeTuple;
make_tuple_primitiveT->value.value = new schema::MakeTupleT;
if (make_tuple_primitiveT->value.value == nullptr) {
MS_LOG(ERROR) << "new MakeTupleT failed";
delete (make_tuple_primitiveT);
return nullptr;
}
return std::make_shared<MakeTuple>(make_tuple_primitiveT);
}
std::shared_ptr<PrimitiveC> GetTupleGetItemPrim() {
auto tuple_get_item_primitiveT = new schema::PrimitiveT();
if (tuple_get_item_primitiveT == nullptr) {
MS_LOG(ERROR) << "new PrimitiveT failed";
return nullptr;
}
tuple_get_item_primitiveT->value.type = schema::PrimitiveType_TupleGetItem;
tuple_get_item_primitiveT->value.value = new schema::TupleGetItemT;
if (tuple_get_item_primitiveT->value.value == nullptr) {
MS_LOG(ERROR) << "new TupleGetItemT failed";
delete (tuple_get_item_primitiveT);
return nullptr;
}
return std::make_shared<TupleGetItem>(tuple_get_item_primitiveT);
}
......
......@@ -112,8 +112,8 @@ abstract::AbstractTensorPtr AnfImporterFromMetaGraphT::ConvertTensorToAbstractTe
return std::make_shared<abstract::AbstractTensor>(type_ptr, shape);
}
void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode,
const CNodePtr &dst_cnode) {
int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode,
const CNodePtr &dst_cnode) {
MS_ASSERT(nullptr != meta_graph_);
MS_ASSERT(nullptr != src_cnode);
MS_ASSERT(nullptr != dst_cnode);
......@@ -133,7 +133,12 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CN
auto &tensor = meta_graph_->allTensors.at(out_tensor_id);
MS_ASSERT(nullptr != tensor);
abstract_list.emplace_back(ConvertTensorToAbstractTensor(tensor));
auto tuple_get_item_prim = NewValueNode(GetTupleGetItemPrim());
auto tuple_get_item_prim_ptr = GetTupleGetItemPrim();
if (tuple_get_item_prim_ptr == nullptr) {
MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr";
return RET_ERROR;
}
auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr);
auto get_item_value = NewValueNode(MakeValue<int>(i));
std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value};
CNodePtr get_item_cnode = func_graph_->NewCNode(inputs);
......@@ -142,6 +147,7 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CN
}
dst_cnode->set_abstract(std::make_shared<abstract::AbstractTuple>(abstract_list));
}
return RET_OK;
}
int AnfImporterFromMetaGraphT::ConverterCNode() {
......@@ -161,7 +167,11 @@ int AnfImporterFromMetaGraphT::ConverterCNode() {
}
auto new_cnode = func_graph_->NewCNode(op_inputs);
new_cnode->set_fullname_with_scope(cNode->name);
ConvertAbstract(cNode, new_cnode);
auto ret = ConvertAbstract(cNode, new_cnode);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvertAbstract failed.";
return RET_ERROR;
}
}
return RET_OK;
}
......@@ -171,7 +181,12 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() {
MS_EXCEPTION_IF_NULL(func_graph_);
if (meta_graph_->outputIndex.size() > 1) {
std::vector<AnfNodePtr> make_tuple_inputs;
auto make_tuple_prim = NewValueNode(GetMakeTuplePrim());
auto make_tuple_prim_ptr = GetMakeTuplePrim();
if (make_tuple_prim_ptr == nullptr) {
MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr";
return RET_ERROR;
}
auto make_tuple_prim = NewValueNode(make_tuple_prim_ptr);
make_tuple_inputs.emplace_back(make_tuple_prim);
for (auto tensor_id : meta_graph_->outputIndex) {
auto cNode = GetNode(tensor_id);
......@@ -185,14 +200,24 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() {
make_tuple_cnode->set_fullname_with_scope("return tuple");
std::vector<AnfNodePtr> op_inputs;
auto value_node = NewValueNode(GetReturnPrim());
auto return_prim_ptr = GetReturnPrim();
if (return_prim_ptr == nullptr) {
MS_LOG(ERROR) << "GetReturnPrim return nullptr";
return RET_ERROR;
}
auto value_node = NewValueNode(return_prim_ptr);
op_inputs.emplace_back(value_node);
op_inputs.emplace_back(make_tuple_cnode);
auto cnode = func_graph_->NewCNode(op_inputs);
cnode->set_fullname_with_scope("return");
func_graph_->set_return(cnode);
} else {
auto value_node = NewValueNode(GetReturnPrim());
auto return_prim_ptr = GetReturnPrim();
if (return_prim_ptr == nullptr) {
MS_LOG(ERROR) << "GetReturnPrim return nullptr";
return RET_ERROR;
}
auto value_node = NewValueNode(return_prim_ptr);
std::vector<AnfNodePtr> op_inputs{value_node};
auto cnode = GetNode(meta_graph_->outputIndex.front());
if (nullptr == cnode) {
......
......@@ -41,7 +41,7 @@ class AnfImporterFromMetaGraphT : public AnfImporter {
ValueNodePtr ConvertPrimitive(const std::unique_ptr<schema::CNodeT> &cNode);
abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr<schema::TensorT> &tensor);
void ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, const CNodePtr &dst_cnode);
int ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, const CNodePtr &dst_cnode);
int AddReturnCNode() override;
......
......@@ -54,8 +54,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser {
// common
AddFlag(&BenchmarkFlags::modelPath, "modelPath", "Input model path", "");
AddFlag(&BenchmarkFlags::inDataPath, "inDataPath", "Input data path, if not set, use random input", "");
AddFlag(&BenchmarkFlags::inDataTypeIn, "inDataType", "Input data type. img | bin", "bin");
AddFlag(&BenchmarkFlags::omModelPath, "omModelPath", "OM model path, only required when device is NPU", "");
AddFlag(&BenchmarkFlags::device, "device", "CPU | GPU", "CPU");
AddFlag(&BenchmarkFlags::cpuBindMode, "cpuBindMode",
"Input -1 for MID_CPU, 1 for HIGHER_CPU, 0 for NO_BIND, defalut value: 1", 1);
......@@ -67,8 +65,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser {
// MarkAccuracy
AddFlag(&BenchmarkFlags::calibDataPath, "calibDataPath", "Calibration data file path", "");
AddFlag(&BenchmarkFlags::accuracyThreshold, "accuracyThreshold", "Threshold of accuracy", 0.5);
// Resize
AddFlag(&BenchmarkFlags::resizeDimsIn, "resizeDims", "Dims to resize to", "");
}
~BenchmarkFlags() override = default;
......@@ -83,7 +79,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser {
std::string inDataPath;
std::vector<std::string> input_data_list;
InDataType inDataType;
std::string inDataTypeIn;
std::string inDataTypeIn = "bin";
int cpuBindMode = 1;
// MarkPerformance
int loopCount;
......@@ -94,10 +90,9 @@ class MS_API BenchmarkFlags : public virtual FlagParser {
std::string calibDataPath;
float accuracyThreshold;
// Resize
std::string resizeDimsIn;
std::string resizeDimsIn = "";
std::vector<std::vector<int64_t>> resizeDims;
std::string omModelPath;
std::string device;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册