提交 6d500c86 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!5480 add infershape and trans op optimize

Merge pull request !5480 from zhengjun10/master
......@@ -52,6 +52,12 @@ static const std::vector<schema::PrimitiveType> int8OpList = {
schema::PrimitiveType_Squeeze, schema::PrimitiveType_Sub,
schema::PrimitiveType_TopK, schema::PrimitiveType_Unsqueeze};
static const std::vector<schema::PrimitiveType> needInsertOpList = {
schema::PrimitiveType_Eltwise, schema::PrimitiveType_Activation,
schema::PrimitiveType_Concat, schema::PrimitiveType_Power};
std::vector<schema::PrimitiveType> GetInsertOpList() { return needInsertOpList; }
std::vector<schema::PrimitiveType> Getfp32FullOpList() { return fp32FullOpList; }
std::vector<schema::PrimitiveType> GetNhwcOpList() { return nhwcOpList; }
......
......@@ -30,6 +30,8 @@ namespace lite {
using STATUS = int;
STATUS BroadCastQuantParam(schema::MetaGraphT *graphT, const std::unique_ptr<schema::CNodeT> &node);
std::vector<schema::PrimitiveType> GetInsertOpList();
std::vector<schema::PrimitiveType> GetNhwcOpList();
std::vector<schema::PrimitiveType> Getfp32FullOpList();
......
......@@ -25,11 +25,13 @@
#include "tools/converter/legacy_optimizer/fusion/format_trans_transpose_fusion_pass.h"
#include "tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h"
#include "tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h"
#include "tools/converter/legacy_optimizer/graph/trans_format_remove_pass.h"
#include "tools/converter/legacy_optimizer/graph/infershape_pass.h"
#include "tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.h"
#include "tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.h"
#include "tools/converter/legacy_optimizer/graph/weight_format_transform_pass.h"
#include "tools/converter/legacy_optimizer/graph/format_trans_pass.h"
#include "tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.h"
#include "tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h"
#include "tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h"
#include "tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h"
#include "tools/converter/legacy_optimizer/graph/topological_sort_pass.h"
......@@ -145,11 +147,14 @@ int GraphDefTransform::Transform(const converter::Flags &ctx) {
formatTransPass->SetQuantType(ctx.quantType);
formatTransPass->SetFmk(ctx.fmk);
formatTransOptimizer.AddPass(formatTransPass);
formatTransOptimizer.AddPass(new EltwiseFormatTransPass());
formatTransOptimizer.AddPass(new (std::nothrow) TopologicalSortPass());
formatTransOptimizer.AddPass(new (std::nothrow) InferShapePass());
formatTransOptimizer.AddPass(new (std::nothrow) TransOpRemovePass());
formatTransOptimizer.AddPass(new (std::nothrow) TransOpInsertPass());
formatTransOptimizer.AddPass(new (std::nothrow) FormatTransFusionPass());
formatTransOptimizer.AddPass(new (std::nothrow) IsolatedNodeRemovePass());
status = formatTransOptimizer.Run(graphDefT);
if (status != RET_OK && status != RET_NO_CHANGE) {
if (status != RET_OK && status != RET_NO_CHANGE && status != RET_INFER_ERR) {
MS_LOG(ERROR) << "Run formatTransOptimizer graphPasses Failed";
return status;
}
......
add_library(graph_pass_mid OBJECT
${CMAKE_CURRENT_SOURCE_DIR}/format_trans_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/eltwise_format_trans_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/trans_format_insert_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/dtype_trans_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/isolated_node_remove_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/model_input_format_preprocess_pass.cc
......@@ -9,4 +9,6 @@ add_library(graph_pass_mid OBJECT
${CMAKE_CURRENT_SOURCE_DIR}/topological_sort_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/unused_node_remove_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/batchnorm_convert_scale_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/trans_format_remove_pass.cc
${CMAKE_CURRENT_SOURCE_DIR}/infershape_pass.cc
)
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/legacy_optimizer/graph/infershape_pass.h"
#include <vector>
#include "utils/log_adapter.h"
#include "include/errorcode.h"
#include "src/ir/tensor.h"
#include "src/ops/primitive_c.h"
using mindspore::lite::tensor::Tensor;
using mindspore::lite::PrimitiveC;
namespace mindspore {
namespace lite {
namespace {
std::vector<tensor::Tensor *> ConvertTensorToLiteTensor(MetaGraphT *graph, const std::vector<uint32_t> &tensor_indexs,
const schema::PrimitiveType node_type) {
std::vector<tensor::Tensor *> lite_tensors;
for (size_t i = 0; i < tensor_indexs.size(); i++) {
auto &tensorT = graph->allTensors.at(tensor_indexs[i]);
auto tensor_shape = tensorT->dims;
auto lite_tensor =
new(std::nothrow) tensor::Tensor(TypeId(tensorT->dataType), tensor_shape, tensorT->format, tensorT->nodeType);
if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "lite tensor is nullptr";
return std::vector<tensor::Tensor *>();
}
// reshape op must get tensor data to infershape
if (node_type == schema::PrimitiveType_Reshape && i == 1 && tensorT->nodeType == NodeType_ValueNode) {
auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t);
// when tensorT as param input
if (lite_tensor_size == 0) {
delete lite_tensor;
return std::vector<tensor::Tensor *>();
}
auto tensor_data = new(std::nothrow) char[lite_tensor_size / sizeof(char)];
if (tensor_data == nullptr) {
MS_LOG(ERROR) << "tensor_data is nullptr";
delete lite_tensor;
return std::vector<tensor::Tensor *>();
}
auto ret = memcpy_s(tensor_data, lite_tensor_size, tensorT->data.data(), lite_tensor_size);
if (ret != EOK) {
delete lite_tensor;
delete[] tensor_data;
MS_LOG(ERROR) << "memcpy error: " << ret;
return std::vector<tensor::Tensor *>();
}
lite_tensor->SetData(tensor_data);
lite_tensors.emplace_back(lite_tensor);
continue;
}
lite_tensors.emplace_back(lite_tensor);
}
return lite_tensors;
}
} // namespace
STATUS InferShapePass::Run(MetaGraphT *graph) {
MS_ASSERT(graph != nullptr);
for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) {
auto &node = *iter;
auto input_tensors = ConvertTensorToLiteTensor(graph, node->inputIndex, node->primitive->value.type);
if (input_tensors.empty() || input_tensors.size() != node->inputIndex.size()) {
MS_LOG(ERROR) << "convert input lite tensor error";
return RET_INFER_ERR;
}
auto output_tensors = ConvertTensorToLiteTensor(graph, node->outputIndex, node->primitive->value.type);
if (output_tensors.empty() || output_tensors.size() != node->outputIndex.size()) {
MS_LOG(ERROR) << "convert output lite tensor error";
return RET_INFER_ERR;
}
std::unique_ptr<PrimitiveT> primitiveT(new (std::nothrow) PrimitiveT(*node->primitive));
if (primitiveT == nullptr) {
MS_LOG(ERROR) << "copy primitiveT error";
return RET_ERROR;
}
auto primitiveC = std::shared_ptr<PrimitiveC>(PrimitiveC::UnPackFromSchemaPrimitiveT(primitiveT.release()));
if (primitiveC == nullptr) {
MS_LOG(ERROR) << "unpack primitiveT error";
return RET_ERROR;
}
auto ret = primitiveC->InferShape(input_tensors, output_tensors);
if (ret == RET_INFER_INVALID) {
MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name
<< ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type)
<< "flag set to false.";
} else if (ret != RET_OK) {
MS_LOG(WARNING) << "InferShape failed, name: " << node->name
<< ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type);
return RET_INFER_ERR;
}
// copy output shape to tensorT
for (size_t i = 0; i < output_tensors.size(); i++) {
auto output_dims = output_tensors[i]->shape();
auto &output_tensor = graph->allTensors.at(node->outputIndex[i]);
output_tensor->dims.swap(output_dims);
output_tensor->format = output_tensors[i]->GetFormat();
output_tensor->dataType = output_tensors[i]->data_type();
}
for (auto input_tensor : input_tensors) {
delete input_tensor;
}
for (auto output_tensor : output_tensors) {
delete output_tensor;
}
}
return RET_OK;
}
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_PREDICT_INFERSHAPE_PASS_H
#define MINDSPORE_PREDICT_INFERSHAPE_PASS_H
#include <unordered_map>
#include <memory>
#include <string>
#include <utility>
#include "tools/common/graph_util.h"
#include "tools/converter/optimizer.h"
using mindspore::schema::TensorT;
namespace mindspore {
namespace lite {
class InferShapePass : public GraphPass {
public:
InferShapePass() = default;
~InferShapePass() = default;
STATUS Run(MetaGraphT *graph) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_PREDICT_INFERSHAPE_PASS_H
......@@ -17,7 +17,7 @@
#include <string>
#include <memory>
#include <utility>
#include "tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.h"
#include "tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h"
#include "tools/common/converter_op_utils.h"
#include "tools/common/node_util.h"
#include "utils/log_adapter.h"
......@@ -25,7 +25,7 @@
namespace mindspore {
namespace lite {
bool EltwiseFormatTransPass::CanFusion(schema::MetaGraphT *graph, const std::unique_ptr<CNodeT> &node) {
bool TransOpInsertPass::CanFusion(schema::MetaGraphT *graph, const std::unique_ptr<CNodeT> &node) {
auto input_node_indexes = GetInputNodeIdx(*graph, *node);
pre_type_ = schema::PrimitiveType_NONE;
size_t has_trans_count = 0;
......@@ -95,7 +95,7 @@ bool EltwiseFormatTransPass::CanFusion(schema::MetaGraphT *graph, const std::uni
return can_fusion;
}
STATUS EltwiseFormatTransPass::FindOutTransType() {
STATUS TransOpInsertPass::FindOutTransType() {
pre_insert_trans_type_ = kNHWC2NCHW;
post_insert_trans_type_ = kNHWC2NCHW;
if (pre_type_ == PrimitiveType_NONE && post_type_ != PrimitiveType_NONE) {
......@@ -117,12 +117,12 @@ STATUS EltwiseFormatTransPass::FindOutTransType() {
return RET_OK;
}
STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) {
STATUS TransOpInsertPass::Run(schema::MetaGraphT *graph) {
MS_ASSERT(graph != nullptr);
for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) {
auto &node = *iter;
auto type = node->primitive->value.type;
if (type != PrimitiveType_Eltwise && type != PrimitiveType_Activation) {
if (!IsContain(GetInsertOpList(), type)) {
continue;
}
auto node_name = node->name;
......@@ -134,7 +134,14 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) {
MS_LOG(ERROR) << "FindOutTransType error";
return ret;
}
// 4 dims means infershape success,can delete
if (type == PrimitiveType_Concat) {
if (graph->allTensors.at(node->inputIndex[0])->dims.size() == 4) {
node->primitive->value.AsConcat()->axis = -1;
} else {
continue;
}
}
STATUS status = RET_OK;
auto input_tensor_size = (*iter)->inputIndex.size();
for (size_t i = 0; i < input_tensor_size; i++) {
......
......@@ -24,11 +24,11 @@
namespace mindspore {
namespace lite {
class EltwiseFormatTransPass : public FormatTransPass {
class TransOpInsertPass : public FormatTransPass {
public:
EltwiseFormatTransPass() : FormatTransPass() {}
TransOpInsertPass() : FormatTransPass() {}
~EltwiseFormatTransPass() override = default;
~TransOpInsertPass() override = default;
STATUS Run(schema::MetaGraphT *graph) override;
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/legacy_optimizer/graph/trans_format_remove_pass.h"
#include <vector>
#include "utils/log_adapter.h"
#include "include/errorcode.h"
#include "tools/common/graph_util.h"
#include "src/ir/tensor.h"
#include "src/ops/primitive_c.h"
using mindspore::lite::tensor::Tensor;
using mindspore::lite::PrimitiveC;
namespace mindspore {
namespace lite {
STATUS TransOpRemovePass::Run(MetaGraphT *graph) {
MS_ASSERT(graph != nullptr);
for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) {
auto &node = *iter;
auto type = node->primitive->value.type;
if (type == schema::PrimitiveType_Nchw2Nhwc || type == schema::PrimitiveType_Nhwc2Nchw) {
auto &input_tensor = graph->allTensors.at(node->inputIndex.at(0));
// less than 4 dims can delete
if (!input_tensor->dims.empty() && input_tensor->dims.size() < 4) {
auto status = IsolateOneWayNode(graph, node.get(), true);
if (status != RET_OK) {
MS_LOG(ERROR) << "IsolateOneWayNode failed, node: " << node->name.c_str() << ", error: " << status;
return status;
}
}
}
}
return RET_OK;
}
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_PREDICT_TRANS_FORMAT_REMOVE_PASS_H
#define MINDSPORE_PREDICT_TRANS_FORMAT_REMOVE_PASS_H
#include <unordered_map>
#include <memory>
#include <string>
#include <utility>
#include "tools/common/graph_util.h"
#include "tools/converter/optimizer.h"
using mindspore::schema::TensorT;
namespace mindspore {
namespace lite {
class TransOpRemovePass : public GraphPass {
public:
TransOpRemovePass() = default;
~TransOpRemovePass() = default;
STATUS Run(MetaGraphT *graph) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_PREDICT_TRANS_FORMAT_REMOVE_PASS_H
......@@ -52,7 +52,7 @@ STATUS Optimizer::Run(schema::MetaGraphT *graphDefT) {
for (auto &opDef : graphDefT->nodes) {
for (auto pass : this->nodePasses) {
status = pass->Run(new GraphNode(graphDefT, opDef.get()));
if (status != RET_OK && status != RET_NO_CHANGE) {
if (status != RET_OK && status != RET_NO_CHANGE && status != RET_INFER_ERR) {
MS_LOG(ERROR) << "Run NodePass failed";
return status;
} else {
......@@ -65,7 +65,7 @@ STATUS Optimizer::Run(schema::MetaGraphT *graphDefT) {
for (auto pass : this->graphPasses) {
status = pass->Run(graphDefT);
if (status != RET_OK && status != RET_NO_CHANGE) {
if (status != RET_OK && status != RET_NO_CHANGE && status != RET_INFER_ERR) {
MS_LOG(ERROR) << "Run GraphPass failed";
return status;
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册