diff --git a/mindspore/core/abstract/abstract_value.h b/mindspore/core/abstract/abstract_value.h index 3bdbd4b5ee6d3173b17024f8f5f702c06a33adc4..fa768addc67f523de39f9533d9f23611acf86674 100644 --- a/mindspore/core/abstract/abstract_value.h +++ b/mindspore/core/abstract/abstract_value.h @@ -285,8 +285,6 @@ class AbstractTensor : public AbstractUndetermined { AbstractBasePtr Broaden(uint8_t config = 0) const override; AbstractBasePtr BroadenWithShape() const; AbstractBasePtr Join(const AbstractBasePtr &other) final; - int format() const { return this->format_; } - void set_format(int format) { this->format_ = format; } bool operator==(const AbstractTensor &other) const; bool operator==(const AbstractBase &other) const override; @@ -303,9 +301,6 @@ class AbstractTensor : public AbstractUndetermined { } return hash_sum; } - - protected: - int format_ = 0; }; using AbstractTensorPtr = std::shared_ptr; using AbstractTensorPtrList = std::vector; diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 2b2c26a0756a951a7eb36adb5211c555c2d303bf..d4fa0e62f6e259545e248033b9ede7b5bb75d324 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -60,6 +60,7 @@ add_compile_definitions(NO_DLIB) add_compile_options(-fPIC) if("${CMAKE_BUILD_TYPE}" STREQUAL "Release") #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") + string(REPLACE "-g" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") endif() if (BUILD_DEVICE) diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index b3e7f96f70bdfee2eaf633d8b50582bd6ecd0283..34751013b2efc84d3b0f419e43755969ca5c421c 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -1,37 +1,30 @@ set(LITE_SRC - ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc - ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc - ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/context.cc - ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc - ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc - ${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc - ${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc - ) - -if (SUPPORT_GPU) - list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc) - list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc) -endif () - -set(LITE_SRC - ${LITE_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc - ${CMAKE_CURRENT_SOURCE_DIR}/model.cc + ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc + ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc + ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc + ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc + ${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc + ${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc + ${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc + ${CMAKE_CURRENT_SOURCE_DIR}/model.cc ) if (SUPPORT_GPU) set(LITE_SRC ${LITE_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc ) endif () diff --git a/mindspore/lite/src/ir/tensor.h b/mindspore/lite/src/ir/tensor.h index 74bf07ec9864df3d8d23e6f4c3e403186684bbcc..1fe6b9702e6a8ef6f51f256c22344a974f36516f 100644 --- a/mindspore/lite/src/ir/tensor.h +++ b/mindspore/lite/src/ir/tensor.h @@ -147,6 +147,7 @@ class Tensor : public mindspore::tensor::MetaTensor { } if (nullptr == allocator_) { free(this->data_); + this->data_ = nullptr; } else { allocator_->Free(this->data_); this->data_ = nullptr; diff --git a/mindspore/lite/src/param_value_lite.h b/mindspore/lite/src/param_value_lite.h index 907e70a2d0cb9f3c91bb45161b7c483202135c6c..72d027a338b9fa7360a7a99efc01d5c22e7df777 100644 --- a/mindspore/lite/src/param_value_lite.h +++ b/mindspore/lite/src/param_value_lite.h @@ -23,6 +23,7 @@ #include #include "ir/dtype/type_id.h" +#include "schema/inner/model_generated.h" namespace mindspore { class ParamValueLite : public Value { @@ -50,18 +51,20 @@ class ParamValueLite : public Value { return size; } - bool operator==(const Value &other) const override { - return this == &other; - } + bool operator==(const Value &other) const override { return this == &other; } + + int format() const { return this->format_; } + + void set_format(int format) { this->format_ = format; } private: - void *tensor_addr_; - size_t tensor_size_; - std::vector tensor_shape_; - TypeId type_id_; + void *tensor_addr_ = nullptr; + size_t tensor_size_ = 0; + int format_ = schema::Format_KCHW; + std::vector tensor_shape_{}; + TypeId type_id_ = TypeId::kNumberTypeFloat32; }; using ParamValueLitePtr = std::shared_ptr; } // namespace mindspore #endif // MINDSPORE_LITE_SRC_PARAM_VALUE_LITE_H_ - diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index faf717b7ef6b4b5eb19d57db8bb62782de1a201f..32a24792be77734cbe60fa5c86d8889bcea17e0e 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -35,12 +35,12 @@ void AnfExporter::RemoveIfMakeTuple(const CNodePtr &cnode) { inputs.emplace_back(cnode->input(0)); for (size_t i = 1; i < cnode->inputs().size(); ++i) { - AnfNodePtr inputNode = cnode->input(i); - if (!inputNode->isa()) { + AnfNodePtr input_node = cnode->input(i); + if (!input_node->isa()) { inputs.emplace_back(cnode->input(i)); continue; } - auto make_tuple_node = utils::cast(inputNode); + auto make_tuple_node = utils::cast(input_node); if (IsPrimitiveCNode(make_tuple_node, schema::PrimitiveType_MakeTuple)) { has_make_tuple = true; for (size_t j = 1; j < make_tuple_node->inputs().size(); ++j) { @@ -62,12 +62,12 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) { inputs.clear(); inputs.emplace_back(cnode->input(0)); for (size_t i = 1; i < cnode->inputs().size(); ++i) { - AnfNodePtr inputNode = cnode->input(i); - if (!inputNode->isa()) { + AnfNodePtr input_node = cnode->input(i); + if (!input_node->isa()) { inputs.emplace_back(cnode->input(i)); continue; } - auto tuple_get_item_node = utils::cast(inputNode); + auto tuple_get_item_node = utils::cast(input_node); if (IsPrimitiveCNode(tuple_get_item_node, schema::PrimitiveType_TupleGetItem)) { has_tuple_get_item = true; inputs.emplace_back(tuple_get_item_node->input(1)); @@ -76,7 +76,7 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) { MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode"; return false; } - ValueNodePtr value_node = utils::cast(indexNode); + auto value_node = utils::cast(indexNode); map_remove_get_item_[tuple_get_item_node->input(1)->fullname_with_scope()] = GetValue(value_node->value()); } else { inputs.emplace_back(cnode->input(i)); @@ -92,15 +92,20 @@ bool AnfExporter::AddOutPutIfReturn(const std::unique_ptr &m MS_ASSERT(meta_graphT != nullptr); MS_ASSERT(cnode != nullptr); for (size_t i = 1; i < cnode->inputs().size(); ++i) { - auto inputNode = cnode->input(i); - if (!inputNode->isa()) { + auto input_anode = cnode->input(i); + if (!input_anode->isa()) { MS_LOG(ERROR) << "Node of Return's input is not CNode"; return false; } - auto inputCNode = utils::cast(inputNode); - std::string inputName = inputNode->fullname_with_scope(); - auto graphOutput = node_id_map_[inputName]; - meta_graphT->outputIndex.emplace_back(graphOutput); + auto input_cnode = utils::cast(input_anode); + std::string input_name = input_anode->fullname_with_scope(); + auto iter = node_id_map_.find(input_name); + if (iter == node_id_map_.end()) { + MS_LOG(ERROR) << "Could not find output node"; + return false; + } + auto graph_output = iter->second; + meta_graphT->outputIndex.emplace_back(graph_output); } return true; } @@ -198,10 +203,10 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) { } map_remove_get_item_.clear(); RemoveIfMakeTuple(cnode); - if (!RemoveIfTupleGetItem(cnode)) { - MS_LOG(ERROR) << "RemoveIfTupleGetItem failed"; - return nullptr; - } + // if (!RemoveIfTupleGetItem(cnode)) { + // MS_LOG(ERROR) << "RemoveIfTupleGetItem failed"; + // return nullptr; + // } if (primT->value.type == schema::PrimitiveType_Return) { AddOutPutIfReturn(meta_graphT, cnode); @@ -234,33 +239,54 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) { return meta_graphT.release(); } -void AnfExporter::ConvertInputCNode(const std::shared_ptr input_anode, schema::CNodeT *output_cnode) { +int AnfExporter::ConvertInputCNode(const std::shared_ptr input_anode, schema::CNodeT *output_cnode) { std::string input_name = input_anode->fullname_with_scope(); - if (!map_remove_get_item_.empty()) { - for (auto name : map_remove_get_item_) { - if (name.first == input_name) { - input_name = input_name + "_o:" + std::to_string(name.second); - } + auto input_cnode = utils::cast(input_anode); + if (!IsPrimitiveCNode(input_cnode, schema::PrimitiveType_TupleGetItem)) { + if (node_id_map_.find(input_name) != node_id_map_.end()) { + output_cnode->inputIndex.emplace_back(node_id_map_[input_name]); } + } else { + auto inputs = input_cnode->inputs(); + if (inputs.size() != 3) { + MS_LOG(ERROR) << "TupleGetItem should have 3 inputs, got " << inputs.size(); + return RET_ERROR; + } + auto get_item_input_cnode = inputs.at(1); + auto index_vnode = inputs.at(2); + if (!utils::isa(index_vnode)) { + MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode"; + return RET_ERROR; + } + auto value_node = utils::cast(index_vnode); + if (value_node == nullptr) { + MS_LOG(ERROR) << "cast to ValueNode failed"; + return RET_ERROR; + } + auto input_index_key = + get_item_input_cnode->fullname_with_scope() + "_o:" + std::to_string(GetValue(value_node->value())); + auto iter = node_id_map_.find(input_index_key); + if (iter == node_id_map_.end()) { + MS_LOG(ERROR) << "Can not find get_item output tensor"; + return RET_ERROR; + } + output_cnode->inputIndex.emplace_back(iter->second); } - if (node_id_map_.find(input_name) != node_id_map_.end()) { - output_cnode->inputIndex.emplace_back(node_id_map_[input_name]); - } + return RET_OK; } -int AnfExporter::ConvertInputParameter(const std::shared_ptr input_anode, size_t anode_index, +int AnfExporter::ConvertInputParameter(const std::shared_ptr input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode) { - std::string input_name = input_anode->fullname_with_scope(); auto paramNode = input_anode->cast(); - if (paramNode->name().empty()) { - paramNode->set_name(input_name + "_i:" + std::to_string(anode_index - 1)); - } - if (node_id_map_.find(paramNode->name()) != node_id_map_.end()) { + std::string input_name = paramNode->fullname_with_scope(); + if (node_id_map_.find(input_name) != node_id_map_.end()) { output_cnode->inputIndex.emplace_back(node_id_map_[paramNode->name()]); return RET_OK; } auto paramTensor = std::make_unique(); + paramTensor->nodeType = schema::NodeType_ValueNode; + paramTensor->format = schema::Format_NHWC; auto abstractBase = paramNode->abstract(); if (abstractBase == nullptr) { MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << paramNode->name(); @@ -274,7 +300,6 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr input_anod auto typePtr = abstractTensor->element()->GetTypeTrack(); MS_ASSERT(typePtr != nullptr); paramTensor->dataType = typePtr->type_id(); - paramTensor->format = schema::Format(abstractTensor->format()); if (!utils::isa(abstractTensor->BuildShape())) { MS_LOG(ERROR) << "Shape of Abstract of parameter should be ShapePtr, " << paramNode->name(); return RET_ERROR; @@ -282,11 +307,11 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr input_anod paramTensor->dims = utils::cast(abstractTensor->BuildShape())->shape(); auto paramValue = std::dynamic_pointer_cast(paramNode->default_param()); if (paramValue != nullptr) { - paramTensor->nodeType = schema::NodeType_ValueNode; paramTensor->data.resize(paramValue->tensor_size()); + paramTensor->format = schema::Format(paramValue->format()); memcpy(paramTensor->data.data(), paramValue->tensor_addr(), paramValue->tensor_size()); } - node_id_map_[paramNode->fullname_with_scope()] = meta_graphT->allTensors.size(); + node_id_map_[input_name] = meta_graphT->allTensors.size(); output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size()); meta_graphT->allTensors.emplace_back(std::move(paramTensor)); return RET_OK; @@ -345,9 +370,13 @@ int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptrinput(i); if (input_node->isa()) { is_graph_input = false; - ConvertInputCNode(input_node, fb_node); + auto ret = ConvertInputCNode(input_node, fb_node); + if (ret != RET_OK) { + MS_LOG(ERROR) << "ConvertInputCNode failed"; + return RET_ERROR; + } } else if (input_node->isa()) { - auto ret = ConvertInputParameter(input_node, i, meta_graphT, fb_node); + auto ret = ConvertInputParameter(input_node, meta_graphT, fb_node); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvertInputParameter failed"; return RET_ERROR; diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.h b/mindspore/lite/tools/anf_exporter/anf_exporter.h index cb05852232bb7bdb768326d23ad946301e28ce6f..6fe0d1067cf8bb5ac4824e9ec703c49a62b0ba1f 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.h +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.h @@ -1,7 +1,5 @@ /** - * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). - * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -42,8 +40,8 @@ class AnfExporter { bool AddOutPutIfReturn(const std::unique_ptr &meta_graphT, const CNodePtr &cnode); protected: - void ConvertInputCNode(const std::shared_ptr input_anode, schema::CNodeT *output_cnode); - int ConvertInputParameter(const std::shared_ptr input_anode, size_t anode_index, + int ConvertInputCNode(const std::shared_ptr input_anode, schema::CNodeT *output_cnode); + int ConvertInputParameter(const std::shared_ptr input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode); int ConvertInputValueNode(std::shared_ptr input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode); diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.cc b/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.cc index 30f964cb1c70fb8340be530d71eb2d237b6d9ad0..0294865bd261f4bc440acf422ea197d355bfe4f0 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.cc +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.cc @@ -1,6 +1,4 @@ /** - * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). - * * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.h b/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.h index c9af84fdadd1b74484c63e6a5dc2447d6f28340a..b10845bbfb74e412a1b9ccdf657c9705716dcac3 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.h +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_concat_populater.h @@ -1,6 +1,4 @@ /** - * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). - * * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.cc b/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.cc index 523c73501b51532fcefc392a3da33aae33ce8740..6e4a50e29c7002b86e931709d5dfff616b46cbd9 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.cc +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.cc @@ -1,8 +1,5 @@ /** - * This is the C++ adaptation and derivative work of Myia - * (https://github.com/mila-iqia/myia/). - * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,14 +19,12 @@ #include #include #include "tools/anf_importer/anf_populater/anf_node_populater_registry.h" -#include "ir/func_graph.h" -#include "src/ir/tensor.h" #include "tools/converter/quantizer/quantize_util.h" namespace mindspore::lite { void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim, - const std::unique_ptr &primitive, - const int &group, const std::vector &inputs) { + const std::unique_ptr &primitive, const int &group, + const std::vector &inputs) { auto attr = std::make_unique(); auto format = GetValue(prim->GetAttr("data_format")); if (format == "NCHW") { @@ -73,19 +68,11 @@ void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim, attr->channelMultiplier = channel_mutiplier; MS_ASSERT(inputs.size() == kAnfPopulaterTwo); - auto inputNode = inputs[kAnfPopulaterOne]; - MS_ASSERT(inputNode != nullptr); - if (inputNode->isa()) { - auto paramNode = inputNode->cast(); - auto abstractBase = paramNode->abstract(); - MS_ASSERT(abstractBase != nullptr); - if (utils::isa(abstractBase)) { - auto abstractTensor = utils::cast(abstractBase); - MS_ASSERT(abstractTensor != nullptr); - if (abstractTensor->format() == schema::Format_NCHW) { - abstractTensor->set_format(schema::Format_KCHW); - } - } + auto input_node = inputs[kAnfPopulaterOne]; + MS_ASSERT(input_node != nullptr); + if (input_node->isa()) { + auto param_node = input_node->cast(); + ConvertConvWeight(param_node); } primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; @@ -144,10 +131,9 @@ void AnfConvPopulater::CalQuantParam(const double &mean, const double &stdDev, f *mMax = static_cast((qmax - mean) / stdDev); } -void AnfConvPopulater::PopulaterQuantParam( - const PrimitivePtr &prim, - std::vector> *vecInputQuantParam, - std::vector> *vecOutputQuantParam) { +void AnfConvPopulater::PopulaterQuantParam(const PrimitivePtr &prim, + std::vector> *vecInputQuantParam, + std::vector> *vecOutputQuantParam) { auto narrow_range = prim->GetAttr("narrow_range"); bool narrowRangeQuantParam = GetValue(narrow_range); auto num_bits = prim->GetAttr("num_bits"); @@ -206,8 +192,7 @@ void AnfConvPopulater::PopulaterQuantParam( quantParam.max = 0.0; quantParam.zeroPoint = 0; - quantParam.scale = - vecInputQuantParam->at(0).at(0).scale * vecInputQuantParam->at(1).at(i).scale; + quantParam.scale = vecInputQuantParam->at(0).at(0).scale * vecInputQuantParam->at(1).at(i).scale; quants.emplace_back(quantParam); } vecInputQuantParam->emplace_back(quants); diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.h b/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.h index 7386b24afe20eb74c3defaa08499d49cb4ec109f..3e32d013d2af4f03f3acbb7b32f4beb6f2439c95 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.h +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_conv_populater.h @@ -1,8 +1,5 @@ /** - * This is the C++ adaptation and derivative work of Myia - * (https://github.com/mila-iqia/myia/). - * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +17,13 @@ #ifndef MINDSPORE_ANF_CONV_PARSER_H #define MINDSPORE_ANF_CONV_PARSER_H +#include "tools/anf_importer/anf_populater/anf_node_populater.h" #include #include -#include "tools/anf_importer/anf_populater/anf_node_populater.h" +#include "base/base_ref.h" +#include "abstract/abstract_value.h" +#include "src/param_value_lite.h" +#include "src/ir/tensor.h" namespace mindspore::lite { class AnfConvPopulater : public AnfNodePopulater { @@ -33,18 +34,65 @@ class AnfConvPopulater : public AnfNodePopulater { const std::vector &inputs) override; private: - void PopulaterConv2DMultiGroup( - const PrimitivePtr &prim, - const std::unique_ptr &primitive, const int &group, const std::vector &inputs); - void PopulaterConv2DSingleGroup( - const PrimitivePtr &prim, - const std::unique_ptr &primitive, const int &group); - void PopulaterQuantParam( - const PrimitivePtr &prim, - std::vector> *vecInputQuantParam, - std::vector> *vecOutputQuantParam); - void CalQuantParam(const double &mean, const double &stdDev, float *mMin, - float *mMax); + template + void ConvertConvWeight(const ParameterPtr ¶m_node) { + MS_ASSERT(param_node != nullptr); + auto param = param_node->default_param(); + auto weight = std::dynamic_pointer_cast(param); + MS_ASSERT(weight != nullptr); + + std::unique_ptr buf(new (std::nothrow) T[weight->tensor_shape_size()]); + if (buf == nullptr) { + MS_LOG(ERROR) << "new buf failed"; + return; + } + + size_t filter_k = weight->tensor_shape()[0]; + size_t filter_c = weight->tensor_shape()[1]; + size_t filter_h = weight->tensor_shape()[2]; + size_t filter_w = weight->tensor_shape()[3]; + T *p1Buff = nullptr; + T *p2Buff = nullptr; + for (size_t k = 0; k < filter_k; ++k) { + for (size_t c = 0; c < filter_c; ++c) { + for (size_t h = 0; h < filter_h; ++h) { + for (size_t w = 0; w < filter_w; ++w) { + p1Buff = reinterpret_cast(weight->tensor_addr()) + + ((k * filter_c * filter_h * filter_w) + (c * filter_h * filter_w) + (h * filter_w) + (w)); + p2Buff = + buf.get() + ((c * filter_k * filter_h * filter_w) + (k * filter_h * filter_w) + (h * filter_w) + (w)); + *p2Buff = *p1Buff; + } + } + } + } + + auto ret = ::memcpy_s(weight->tensor_addr(), weight->tensor_shape_size() * sizeof(T), buf.get(), + weight->tensor_shape_size() * sizeof(T)); + if (ret != EOK) { + MS_LOG(ERROR) << "memcpy_s failed: " << ret; + return; + } + + auto abstract_base = param_node->abstract(); + MS_ASSERT(abstract_base != nullptr); + if (utils::isa(abstract_base)) { + auto abstract_tensor = utils::cast(abstract_base); + utils::cast(abstract_tensor->BuildShape())->shape()[0] = filter_c; + utils::cast(abstract_tensor->BuildShape())->shape()[1] = filter_k; + utils::cast(abstract_tensor->BuildShape())->shape()[2] = filter_h; + utils::cast(abstract_tensor->BuildShape())->shape()[3] = filter_w; + } + return; + } + + void PopulaterConv2DMultiGroup(const PrimitivePtr &prim, const std::unique_ptr &primitive, + const int &group, const std::vector &inputs); + void PopulaterConv2DSingleGroup(const PrimitivePtr &prim, const std::unique_ptr &primitive, + const int &group); + void PopulaterQuantParam(const PrimitivePtr &prim, std::vector> *vecInputQuantParam, + std::vector> *vecOutputQuantParam); + void CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax); }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc index 39e6a3ab1eb78f97ccb7f4892b2fca505a223db7..3f2ec6fcd5a96f97b47e8044797c54a5e62fd0d1 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc @@ -42,14 +42,14 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { auto type_id = static_cast(tensor->dataType); auto type_ptr = TypeIdToType(type_id); auto abstract_tensor = std::make_shared(type_ptr, shape); - abstract_tensor->set_format(tensor->format); parameter->set_abstract(abstract_tensor); - parameter->set_name("const_" + std::to_string(i)); + parameter->set_name("const_" + std::to_string(i) + "_parameter"); ParamValueLitePtr param_value = std::make_shared(); MS_ASSERT(param_value != nullptr); param_value->set_tensor_shape(shape); param_value->set_tensor_type(type_id); + param_value->set_format(tensor->format); if (!tensor->data.empty()) { auto size = tensor->data.size(); char *tensor_data = new (std::nothrow) char[size]; @@ -138,6 +138,7 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr(i)); std::vector inputs{tuple_get_item_prim, dst_cnode, get_item_value}; CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); + get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i)); AddNode(out_tensor_id, get_item_cnode); } dst_cnode->set_abstract(std::make_shared(abstract_list)); @@ -170,27 +171,41 @@ int AnfImporterFromMetaGraphT::ConverterCNode() { int AnfImporterFromMetaGraphT::AddReturnCNode() { MS_EXCEPTION_IF_NULL(meta_graph_); MS_EXCEPTION_IF_NULL(func_graph_); - std::vector make_tuple_inputs; - auto make_tuple_prim = NewValueNode(GetMakeTuplePrim()); - make_tuple_inputs.emplace_back(make_tuple_prim); - for (auto tensor_id : meta_graph_->outputIndex) { - auto cNode = GetNode(tensor_id); - if (nullptr == cNode) { + if (meta_graph_->outputIndex.size() > 1) { + std::vector make_tuple_inputs; + auto make_tuple_prim = NewValueNode(GetMakeTuplePrim()); + make_tuple_inputs.emplace_back(make_tuple_prim); + for (auto tensor_id : meta_graph_->outputIndex) { + auto cNode = GetNode(tensor_id); + if (nullptr == cNode) { + MS_LOG(ERROR) << "Can't find input node."; + return RET_ERROR; + } + make_tuple_inputs.emplace_back(cNode); + } + auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs); + make_tuple_cnode->set_fullname_with_scope("return tuple"); + + std::vector op_inputs; + auto value_node = NewValueNode(GetReturnPrim()); + op_inputs.emplace_back(value_node); + op_inputs.emplace_back(make_tuple_cnode); + auto cnode = func_graph_->NewCNode(op_inputs); + cnode->set_fullname_with_scope("return"); + func_graph_->set_return(cnode); + } else { + auto value_node = NewValueNode(GetReturnPrim()); + std::vector op_inputs{value_node}; + auto cnode = GetNode(meta_graph_->outputIndex.front()); + if (nullptr == cnode) { MS_LOG(ERROR) << "Can't find input node."; return RET_ERROR; } - make_tuple_inputs.emplace_back(cNode); + op_inputs.emplace_back(cnode); + auto return_cnode = func_graph_->NewCNode(op_inputs); + return_cnode->set_fullname_with_scope("return"); + func_graph_->set_return(return_cnode); } - auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs); - make_tuple_cnode->set_fullname_with_scope("return tuple"); - - std::vector op_inputs; - auto value_node = NewValueNode(GetReturnPrim()); - op_inputs.emplace_back(value_node); - op_inputs.emplace_back(make_tuple_cnode); - auto cnode = func_graph_->NewCNode(op_inputs); - cnode->set_fullname_with_scope("return"); - func_graph_->set_return(cnode); return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc index 5c25e73a6f5a1444375149c86ebe3d7b9e8a0c05..95c64a57069b01c104d291685f2be7e6e310bc62 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc @@ -167,9 +167,6 @@ STATUS WeightFormatHardCodePass::HardCodeMS(const std::unique_ptr &node, if (opType == PrimitiveType_Conv2D) { weightTensor->format = Format_KCHW; } else if (opType == PrimitiveType_DepthwiseConv2D) { - if (weightTensor->format == Format_KCHW) { - TransFilterFormat(weightTensor.get(), kKCHW2CKHW); - } weightTensor->format = Format_CKHW; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name;