提交 a27e6f57 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4664 fix bug in anf_exporter and anf_importer

Merge pull request !4664 from hangq/master
......@@ -285,8 +285,6 @@ class AbstractTensor : public AbstractUndetermined {
AbstractBasePtr Broaden(uint8_t config = 0) const override;
AbstractBasePtr BroadenWithShape() const;
AbstractBasePtr Join(const AbstractBasePtr &other) final;
int format() const { return this->format_; }
void set_format(int format) { this->format_ = format; }
bool operator==(const AbstractTensor &other) const;
bool operator==(const AbstractBase &other) const override;
......@@ -303,9 +301,6 @@ class AbstractTensor : public AbstractUndetermined {
}
return hash_sum;
}
protected:
int format_ = 0;
};
using AbstractTensorPtr = std::shared_ptr<AbstractTensor>;
using AbstractTensorPtrList = std::vector<AbstractTensorPtr>;
......
......@@ -60,6 +60,7 @@ add_compile_definitions(NO_DLIB)
add_compile_options(-fPIC)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden")
string(REPLACE "-g" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
if (BUILD_DEVICE)
......
set(LITE_SRC
${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc
${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc
${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc
${CMAKE_CURRENT_SOURCE_DIR}/context.cc
${CMAKE_CURRENT_SOURCE_DIR}/executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc
${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc
${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc
)
if (SUPPORT_GPU)
list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc)
list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc)
endif ()
set(LITE_SRC
${LITE_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/model.cc
${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc
${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc
${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc
${CMAKE_CURRENT_SOURCE_DIR}/context.cc
${CMAKE_CURRENT_SOURCE_DIR}/executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc
${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc
${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc
${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/model.cc
)
if (SUPPORT_GPU)
set(LITE_SRC
${LITE_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc
)
endif ()
......
......@@ -147,6 +147,7 @@ class Tensor : public mindspore::tensor::MetaTensor {
}
if (nullptr == allocator_) {
free(this->data_);
this->data_ = nullptr;
} else {
allocator_->Free(this->data_);
this->data_ = nullptr;
......
......@@ -23,6 +23,7 @@
#include <utility>
#include "ir/dtype/type_id.h"
#include "schema/inner/model_generated.h"
namespace mindspore {
class ParamValueLite : public Value {
......@@ -50,18 +51,20 @@ class ParamValueLite : public Value {
return size;
}
bool operator==(const Value &other) const override {
return this == &other;
}
bool operator==(const Value &other) const override { return this == &other; }
int format() const { return this->format_; }
void set_format(int format) { this->format_ = format; }
private:
void *tensor_addr_;
size_t tensor_size_;
std::vector<int> tensor_shape_;
TypeId type_id_;
void *tensor_addr_ = nullptr;
size_t tensor_size_ = 0;
int format_ = schema::Format_KCHW;
std::vector<int> tensor_shape_{};
TypeId type_id_ = TypeId::kNumberTypeFloat32;
};
using ParamValueLitePtr = std::shared_ptr<ParamValueLite>;
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_PARAM_VALUE_LITE_H_
......@@ -35,12 +35,12 @@ void AnfExporter::RemoveIfMakeTuple(const CNodePtr &cnode) {
inputs.emplace_back(cnode->input(0));
for (size_t i = 1; i < cnode->inputs().size(); ++i) {
AnfNodePtr inputNode = cnode->input(i);
if (!inputNode->isa<CNode>()) {
AnfNodePtr input_node = cnode->input(i);
if (!input_node->isa<CNode>()) {
inputs.emplace_back(cnode->input(i));
continue;
}
auto make_tuple_node = utils::cast<CNodePtr>(inputNode);
auto make_tuple_node = utils::cast<CNodePtr>(input_node);
if (IsPrimitiveCNode(make_tuple_node, schema::PrimitiveType_MakeTuple)) {
has_make_tuple = true;
for (size_t j = 1; j < make_tuple_node->inputs().size(); ++j) {
......@@ -62,12 +62,12 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) {
inputs.clear();
inputs.emplace_back(cnode->input(0));
for (size_t i = 1; i < cnode->inputs().size(); ++i) {
AnfNodePtr inputNode = cnode->input(i);
if (!inputNode->isa<CNode>()) {
AnfNodePtr input_node = cnode->input(i);
if (!input_node->isa<CNode>()) {
inputs.emplace_back(cnode->input(i));
continue;
}
auto tuple_get_item_node = utils::cast<CNodePtr>(inputNode);
auto tuple_get_item_node = utils::cast<CNodePtr>(input_node);
if (IsPrimitiveCNode(tuple_get_item_node, schema::PrimitiveType_TupleGetItem)) {
has_tuple_get_item = true;
inputs.emplace_back(tuple_get_item_node->input(1));
......@@ -76,7 +76,7 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) {
MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode";
return false;
}
ValueNodePtr value_node = utils::cast<ValueNodePtr>(indexNode);
auto value_node = utils::cast<ValueNodePtr>(indexNode);
map_remove_get_item_[tuple_get_item_node->input(1)->fullname_with_scope()] = GetValue<int>(value_node->value());
} else {
inputs.emplace_back(cnode->input(i));
......@@ -92,15 +92,20 @@ bool AnfExporter::AddOutPutIfReturn(const std::unique_ptr<schema::MetaGraphT> &m
MS_ASSERT(meta_graphT != nullptr);
MS_ASSERT(cnode != nullptr);
for (size_t i = 1; i < cnode->inputs().size(); ++i) {
auto inputNode = cnode->input(i);
if (!inputNode->isa<CNode>()) {
auto input_anode = cnode->input(i);
if (!input_anode->isa<CNode>()) {
MS_LOG(ERROR) << "Node of Return's input is not CNode";
return false;
}
auto inputCNode = utils::cast<CNodePtr>(inputNode);
std::string inputName = inputNode->fullname_with_scope();
auto graphOutput = node_id_map_[inputName];
meta_graphT->outputIndex.emplace_back(graphOutput);
auto input_cnode = utils::cast<CNodePtr>(input_anode);
std::string input_name = input_anode->fullname_with_scope();
auto iter = node_id_map_.find(input_name);
if (iter == node_id_map_.end()) {
MS_LOG(ERROR) << "Could not find output node";
return false;
}
auto graph_output = iter->second;
meta_graphT->outputIndex.emplace_back(graph_output);
}
return true;
}
......@@ -198,10 +203,10 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) {
}
map_remove_get_item_.clear();
RemoveIfMakeTuple(cnode);
if (!RemoveIfTupleGetItem(cnode)) {
MS_LOG(ERROR) << "RemoveIfTupleGetItem failed";
return nullptr;
}
// if (!RemoveIfTupleGetItem(cnode)) {
// MS_LOG(ERROR) << "RemoveIfTupleGetItem failed";
// return nullptr;
// }
if (primT->value.type == schema::PrimitiveType_Return) {
AddOutPutIfReturn(meta_graphT, cnode);
......@@ -234,33 +239,54 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) {
return meta_graphT.release();
}
void AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode) {
int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode) {
std::string input_name = input_anode->fullname_with_scope();
if (!map_remove_get_item_.empty()) {
for (auto name : map_remove_get_item_) {
if (name.first == input_name) {
input_name = input_name + "_o:" + std::to_string(name.second);
}
auto input_cnode = utils::cast<CNodePtr>(input_anode);
if (!IsPrimitiveCNode(input_cnode, schema::PrimitiveType_TupleGetItem)) {
if (node_id_map_.find(input_name) != node_id_map_.end()) {
output_cnode->inputIndex.emplace_back(node_id_map_[input_name]);
}
} else {
auto inputs = input_cnode->inputs();
if (inputs.size() != 3) {
MS_LOG(ERROR) << "TupleGetItem should have 3 inputs, got " << inputs.size();
return RET_ERROR;
}
auto get_item_input_cnode = inputs.at(1);
auto index_vnode = inputs.at(2);
if (!utils::isa<ValueNode>(index_vnode)) {
MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode";
return RET_ERROR;
}
auto value_node = utils::cast<ValueNodePtr>(index_vnode);
if (value_node == nullptr) {
MS_LOG(ERROR) << "cast to ValueNode failed";
return RET_ERROR;
}
auto input_index_key =
get_item_input_cnode->fullname_with_scope() + "_o:" + std::to_string(GetValue<int>(value_node->value()));
auto iter = node_id_map_.find(input_index_key);
if (iter == node_id_map_.end()) {
MS_LOG(ERROR) << "Can not find get_item output tensor";
return RET_ERROR;
}
output_cnode->inputIndex.emplace_back(iter->second);
}
if (node_id_map_.find(input_name) != node_id_map_.end()) {
output_cnode->inputIndex.emplace_back(node_id_map_[input_name]);
}
return RET_OK;
}
int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode, size_t anode_index,
int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *output_cnode) {
std::string input_name = input_anode->fullname_with_scope();
auto paramNode = input_anode->cast<ParameterPtr>();
if (paramNode->name().empty()) {
paramNode->set_name(input_name + "_i:" + std::to_string(anode_index - 1));
}
if (node_id_map_.find(paramNode->name()) != node_id_map_.end()) {
std::string input_name = paramNode->fullname_with_scope();
if (node_id_map_.find(input_name) != node_id_map_.end()) {
output_cnode->inputIndex.emplace_back(node_id_map_[paramNode->name()]);
return RET_OK;
}
auto paramTensor = std::make_unique<schema::TensorT>();
paramTensor->nodeType = schema::NodeType_ValueNode;
paramTensor->format = schema::Format_NHWC;
auto abstractBase = paramNode->abstract();
if (abstractBase == nullptr) {
MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << paramNode->name();
......@@ -274,7 +300,6 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anod
auto typePtr = abstractTensor->element()->GetTypeTrack();
MS_ASSERT(typePtr != nullptr);
paramTensor->dataType = typePtr->type_id();
paramTensor->format = schema::Format(abstractTensor->format());
if (!utils::isa<abstract::ShapePtr>(abstractTensor->BuildShape())) {
MS_LOG(ERROR) << "Shape of Abstract of parameter should be ShapePtr, " << paramNode->name();
return RET_ERROR;
......@@ -282,11 +307,11 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anod
paramTensor->dims = utils::cast<abstract::ShapePtr>(abstractTensor->BuildShape())->shape();
auto paramValue = std::dynamic_pointer_cast<ParamValueLite>(paramNode->default_param());
if (paramValue != nullptr) {
paramTensor->nodeType = schema::NodeType_ValueNode;
paramTensor->data.resize(paramValue->tensor_size());
paramTensor->format = schema::Format(paramValue->format());
memcpy(paramTensor->data.data(), paramValue->tensor_addr(), paramValue->tensor_size());
}
node_id_map_[paramNode->fullname_with_scope()] = meta_graphT->allTensors.size();
node_id_map_[input_name] = meta_graphT->allTensors.size();
output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size());
meta_graphT->allTensors.emplace_back(std::move(paramTensor));
return RET_OK;
......@@ -345,9 +370,13 @@ int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<sch
auto input_node = cnode->input(i);
if (input_node->isa<CNode>()) {
is_graph_input = false;
ConvertInputCNode(input_node, fb_node);
auto ret = ConvertInputCNode(input_node, fb_node);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvertInputCNode failed";
return RET_ERROR;
}
} else if (input_node->isa<Parameter>()) {
auto ret = ConvertInputParameter(input_node, i, meta_graphT, fb_node);
auto ret = ConvertInputParameter(input_node, meta_graphT, fb_node);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvertInputParameter failed";
return RET_ERROR;
......
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
......@@ -42,8 +40,8 @@ class AnfExporter {
bool AddOutPutIfReturn(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const CNodePtr &cnode);
protected:
void ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode);
int ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode, size_t anode_index,
int ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode);
int ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);
int ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);
......
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
......
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
......
/**
* This is the C++ adaptation and derivative work of Myia
* (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
......@@ -22,14 +19,12 @@
#include <vector>
#include <memory>
#include "tools/anf_importer/anf_populater/anf_node_populater_registry.h"
#include "ir/func_graph.h"
#include "src/ir/tensor.h"
#include "tools/converter/quantizer/quantize_util.h"
namespace mindspore::lite {
void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim,
const std::unique_ptr<schema::PrimitiveT> &primitive,
const int &group, const std::vector<AnfNodePtr> &inputs) {
const std::unique_ptr<schema::PrimitiveT> &primitive, const int &group,
const std::vector<AnfNodePtr> &inputs) {
auto attr = std::make_unique<schema::DepthwiseConv2DT>();
auto format = GetValue<std::string>(prim->GetAttr("data_format"));
if (format == "NCHW") {
......@@ -73,19 +68,11 @@ void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim,
attr->channelMultiplier = channel_mutiplier;
MS_ASSERT(inputs.size() == kAnfPopulaterTwo);
auto inputNode = inputs[kAnfPopulaterOne];
MS_ASSERT(inputNode != nullptr);
if (inputNode->isa<Parameter>()) {
auto paramNode = inputNode->cast<ParameterPtr>();
auto abstractBase = paramNode->abstract();
MS_ASSERT(abstractBase != nullptr);
if (utils::isa<abstract::AbstractTensorPtr>(abstractBase)) {
auto abstractTensor = utils::cast<abstract::AbstractTensorPtr>(abstractBase);
MS_ASSERT(abstractTensor != nullptr);
if (abstractTensor->format() == schema::Format_NCHW) {
abstractTensor->set_format(schema::Format_KCHW);
}
}
auto input_node = inputs[kAnfPopulaterOne];
MS_ASSERT(input_node != nullptr);
if (input_node->isa<Parameter>()) {
auto param_node = input_node->cast<ParameterPtr>();
ConvertConvWeight<float>(param_node);
}
primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
......@@ -144,10 +131,9 @@ void AnfConvPopulater::CalQuantParam(const double &mean, const double &stdDev, f
*mMax = static_cast<float>((qmax - mean) / stdDev);
}
void AnfConvPopulater::PopulaterQuantParam(
const PrimitivePtr &prim,
std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam,
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam) {
void AnfConvPopulater::PopulaterQuantParam(const PrimitivePtr &prim,
std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam,
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam) {
auto narrow_range = prim->GetAttr("narrow_range");
bool narrowRangeQuantParam = GetValue<bool>(narrow_range);
auto num_bits = prim->GetAttr("num_bits");
......@@ -206,8 +192,7 @@ void AnfConvPopulater::PopulaterQuantParam(
quantParam.max = 0.0;
quantParam.zeroPoint = 0;
quantParam.scale =
vecInputQuantParam->at(0).at(0).scale * vecInputQuantParam->at(1).at(i).scale;
quantParam.scale = vecInputQuantParam->at(0).at(0).scale * vecInputQuantParam->at(1).at(i).scale;
quants.emplace_back(quantParam);
}
vecInputQuantParam->emplace_back(quants);
......
/**
* This is the C++ adaptation and derivative work of Myia
* (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
......@@ -20,9 +17,13 @@
#ifndef MINDSPORE_ANF_CONV_PARSER_H
#define MINDSPORE_ANF_CONV_PARSER_H
#include "tools/anf_importer/anf_populater/anf_node_populater.h"
#include <memory>
#include <vector>
#include "tools/anf_importer/anf_populater/anf_node_populater.h"
#include "base/base_ref.h"
#include "abstract/abstract_value.h"
#include "src/param_value_lite.h"
#include "src/ir/tensor.h"
namespace mindspore::lite {
class AnfConvPopulater : public AnfNodePopulater {
......@@ -33,18 +34,65 @@ class AnfConvPopulater : public AnfNodePopulater {
const std::vector<AnfNodePtr> &inputs) override;
private:
void PopulaterConv2DMultiGroup(
const PrimitivePtr &prim,
const std::unique_ptr<schema::PrimitiveT> &primitive, const int &group, const std::vector<AnfNodePtr> &inputs);
void PopulaterConv2DSingleGroup(
const PrimitivePtr &prim,
const std::unique_ptr<schema::PrimitiveT> &primitive, const int &group);
void PopulaterQuantParam(
const PrimitivePtr &prim,
std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam,
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam);
void CalQuantParam(const double &mean, const double &stdDev, float *mMin,
float *mMax);
template <typename T>
void ConvertConvWeight(const ParameterPtr &param_node) {
MS_ASSERT(param_node != nullptr);
auto param = param_node->default_param();
auto weight = std::dynamic_pointer_cast<ParamValueLite>(param);
MS_ASSERT(weight != nullptr);
std::unique_ptr<T> buf(new (std::nothrow) T[weight->tensor_shape_size()]);
if (buf == nullptr) {
MS_LOG(ERROR) << "new buf failed";
return;
}
size_t filter_k = weight->tensor_shape()[0];
size_t filter_c = weight->tensor_shape()[1];
size_t filter_h = weight->tensor_shape()[2];
size_t filter_w = weight->tensor_shape()[3];
T *p1Buff = nullptr;
T *p2Buff = nullptr;
for (size_t k = 0; k < filter_k; ++k) {
for (size_t c = 0; c < filter_c; ++c) {
for (size_t h = 0; h < filter_h; ++h) {
for (size_t w = 0; w < filter_w; ++w) {
p1Buff = reinterpret_cast<float *>(weight->tensor_addr()) +
((k * filter_c * filter_h * filter_w) + (c * filter_h * filter_w) + (h * filter_w) + (w));
p2Buff =
buf.get() + ((c * filter_k * filter_h * filter_w) + (k * filter_h * filter_w) + (h * filter_w) + (w));
*p2Buff = *p1Buff;
}
}
}
}
auto ret = ::memcpy_s(weight->tensor_addr(), weight->tensor_shape_size() * sizeof(T), buf.get(),
weight->tensor_shape_size() * sizeof(T));
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy_s failed: " << ret;
return;
}
auto abstract_base = param_node->abstract();
MS_ASSERT(abstract_base != nullptr);
if (utils::isa<abstract::AbstractTensorPtr>(abstract_base)) {
auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base);
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[0] = filter_c;
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[1] = filter_k;
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[2] = filter_h;
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[3] = filter_w;
}
return;
}
void PopulaterConv2DMultiGroup(const PrimitivePtr &prim, const std::unique_ptr<schema::PrimitiveT> &primitive,
const int &group, const std::vector<AnfNodePtr> &inputs);
void PopulaterConv2DSingleGroup(const PrimitivePtr &prim, const std::unique_ptr<schema::PrimitiveT> &primitive,
const int &group);
void PopulaterQuantParam(const PrimitivePtr &prim, std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam,
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam);
void CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax);
};
} // namespace mindspore::lite
......
......@@ -42,14 +42,14 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() {
auto type_id = static_cast<TypeId>(tensor->dataType);
auto type_ptr = TypeIdToType(type_id);
auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape);
abstract_tensor->set_format(tensor->format);
parameter->set_abstract(abstract_tensor);
parameter->set_name("const_" + std::to_string(i));
parameter->set_name("const_" + std::to_string(i) + "_parameter");
ParamValueLitePtr param_value = std::make_shared<ParamValueLite>();
MS_ASSERT(param_value != nullptr);
param_value->set_tensor_shape(shape);
param_value->set_tensor_type(type_id);
param_value->set_format(tensor->format);
if (!tensor->data.empty()) {
auto size = tensor->data.size();
char *tensor_data = new (std::nothrow) char[size];
......@@ -138,6 +138,7 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CN
auto get_item_value = NewValueNode(MakeValue<int>(i));
std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value};
CNodePtr get_item_cnode = func_graph_->NewCNode(inputs);
get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i));
AddNode(out_tensor_id, get_item_cnode);
}
dst_cnode->set_abstract(std::make_shared<abstract::AbstractTuple>(abstract_list));
......@@ -170,27 +171,41 @@ int AnfImporterFromMetaGraphT::ConverterCNode() {
int AnfImporterFromMetaGraphT::AddReturnCNode() {
MS_EXCEPTION_IF_NULL(meta_graph_);
MS_EXCEPTION_IF_NULL(func_graph_);
std::vector<AnfNodePtr> make_tuple_inputs;
auto make_tuple_prim = NewValueNode(GetMakeTuplePrim());
make_tuple_inputs.emplace_back(make_tuple_prim);
for (auto tensor_id : meta_graph_->outputIndex) {
auto cNode = GetNode(tensor_id);
if (nullptr == cNode) {
if (meta_graph_->outputIndex.size() > 1) {
std::vector<AnfNodePtr> make_tuple_inputs;
auto make_tuple_prim = NewValueNode(GetMakeTuplePrim());
make_tuple_inputs.emplace_back(make_tuple_prim);
for (auto tensor_id : meta_graph_->outputIndex) {
auto cNode = GetNode(tensor_id);
if (nullptr == cNode) {
MS_LOG(ERROR) << "Can't find input node.";
return RET_ERROR;
}
make_tuple_inputs.emplace_back(cNode);
}
auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs);
make_tuple_cnode->set_fullname_with_scope("return tuple");
std::vector<AnfNodePtr> op_inputs;
auto value_node = NewValueNode(GetReturnPrim());
op_inputs.emplace_back(value_node);
op_inputs.emplace_back(make_tuple_cnode);
auto cnode = func_graph_->NewCNode(op_inputs);
cnode->set_fullname_with_scope("return");
func_graph_->set_return(cnode);
} else {
auto value_node = NewValueNode(GetReturnPrim());
std::vector<AnfNodePtr> op_inputs{value_node};
auto cnode = GetNode(meta_graph_->outputIndex.front());
if (nullptr == cnode) {
MS_LOG(ERROR) << "Can't find input node.";
return RET_ERROR;
}
make_tuple_inputs.emplace_back(cNode);
op_inputs.emplace_back(cnode);
auto return_cnode = func_graph_->NewCNode(op_inputs);
return_cnode->set_fullname_with_scope("return");
func_graph_->set_return(return_cnode);
}
auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs);
make_tuple_cnode->set_fullname_with_scope("return tuple");
std::vector<AnfNodePtr> op_inputs;
auto value_node = NewValueNode(GetReturnPrim());
op_inputs.emplace_back(value_node);
op_inputs.emplace_back(make_tuple_cnode);
auto cnode = func_graph_->NewCNode(op_inputs);
cnode->set_fullname_with_scope("return");
func_graph_->set_return(cnode);
return RET_OK;
}
......
......@@ -167,9 +167,6 @@ STATUS WeightFormatHardCodePass::HardCodeMS(const std::unique_ptr<CNodeT> &node,
if (opType == PrimitiveType_Conv2D) {
weightTensor->format = Format_KCHW;
} else if (opType == PrimitiveType_DepthwiseConv2D) {
if (weightTensor->format == Format_KCHW) {
TransFilterFormat<float>(weightTensor.get(), kKCHW2CKHW);
}
weightTensor->format = Format_CKHW;
} else {
MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册