提交 0ba85128 编写于 作者: Z zhengjun10

add conv tuple bn fusion

上级 def1ff43
......@@ -246,6 +246,7 @@ if(BUILD_CONVERTER)
${LITE_DIR}/tools/optimizer/common/gllo_utils.cc
${LITE_DIR}/tools/optimizer/fusion/conv_biasadd_fusion.cc
${LITE_DIR}/tools/optimizer/fusion/conv_activation_fusion.cc
${LITE_DIR}/tools/optimizer/fusion/conv_tuple_activation_fusion.cc
${LITE_DIR}/tools/optimizer/fusion/conv_transform_fusion.cc
${LITE_DIR}/tools/optimizer/fusion/conv_scale_fusion.cc
${LITE_DIR}/tools/optimizer/fusion/conv_bn_fusion.cc
......
......@@ -102,6 +102,7 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
../optimizer/common/gllo_utils.cc
../optimizer/fusion/conv_biasadd_fusion.cc
../optimizer/fusion/conv_activation_fusion.cc
../optimizer/fusion/conv_tuple_activation_fusion.cc
../optimizer/fusion/conv_transform_fusion.cc
../optimizer/fusion/conv_scale_fusion.cc
../optimizer/fusion/conv_bn_fusion.cc
......
......@@ -20,6 +20,7 @@
#include "utils/log_adapter.h"
#include "tools/optimizer/fusion/conv_biasadd_fusion.h"
#include "tools/optimizer/fusion/conv_activation_fusion.h"
#include "tools/optimizer/fusion/conv_tuple_activation_fusion.h"
#include "tools/optimizer/fusion/conv_scale_fusion.h"
#include "tools/optimizer/fusion/conv_bn_fusion.h"
#include "tools/optimizer/fusion/constant_folding_fusion.h"
......@@ -44,6 +45,12 @@ FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph) {
schema::ActivationType_RELU));
pm->AddPass(std::make_shared<opt::ConvActivationFusion>(true, "conv_relu6", schema::PrimitiveType_Activation,
schema::ActivationType_RELU6));
pm->AddPass(std::make_shared<opt::ConvTupleActivationFusion>(true, "conv_tuple_relu",
schema::PrimitiveType_Activation,
schema::ActivationType_RELU));
pm->AddPass(std::make_shared<opt::ConvTupleActivationFusion>(true, "conv_tuple_relu6",
schema::PrimitiveType_Activation,
schema::ActivationType_RELU6));
pm->AddPass(std::make_shared<opt::ConstFoldPass>());
optimizer->AddPassManager(pm);
FuncGraphPtr new_graph = optimizer->Optimize(old_graph);
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*conv_activation_fusion.h
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/optimizer/fusion/conv_tuple_activation_fusion.h"
#include <memory>
#include "src/ops/primitive_c.h"
#include "src/ops/conv2d.h"
#include "src/ops/depthwise_conv2d.h"
#include "src/ops/activation.h"
#include "schema/inner/model_generated.h"
#include "tools/optimizer/common/gllo_utils.h"
namespace mindspore::opt {
namespace {
constexpr size_t kActivationInputsLength = 2;
}
const BaseRef ConvTupleActivationFusion::DefinePattern() const {
auto conv_var = std::make_shared<CondVar>(IsConvNode);
auto tuple_index = std::make_shared<Var>();
auto tuple_prim = new schema::PrimitiveT();
tuple_prim->value.type = schema::PrimitiveType_TupleGetItem;
auto tuple_value = std::make_shared<lite::PrimitiveC>(tuple_prim);
VectorRef tuple_get_item = VectorRef({tuple_value, conv_var, tuple_index});
auto act_prim = new schema::PrimitiveT();
act_prim->value.type = primitive_type;
auto act_value = std::make_shared<lite::PrimitiveC>(act_prim);
return VectorRef({act_value, tuple_get_item});
}
const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_LOG(DEBUG) << "conv tuple activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type];
CheckIfFuncGraphIsNull(func_graph);
CheckIfAnfNodeIsNull(node);
auto act_node = node->cast<CNodePtr>();
CheckIfCNodeIsNull(act_node);
CheckInputSize(act_node, kActivationInputsLength);
auto primitivec = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(act_node->input(0));
MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Activation>>(primitivec));
auto act_primitivec = utils::cast<std::shared_ptr<mindspore::lite::Activation>>(primitivec);
MS_ASSERT(act_primitivec != nullptr);
if (act_primitivec->GetType() != activation_type) {
return nullptr;
}
AnfNodePtr tuple_node = act_node->input(1);
MS_ASSERT(tuple_node != nullptr);
auto tuple_cnode = tuple_node->cast<CNodePtr>();
auto conv_node = tuple_cnode->input(1);
CheckIfAnfNodeIsNull(conv_node);
if (conv_node != nullptr && conv_node->isa<CNode>()) {
if (IsMultiOutputTensors(func_graph, conv_node)) {
return nullptr;
}
auto conv_cnode = conv_node->cast<CNodePtr>();
auto node_type = GetCNodeType(conv_cnode);
auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(conv_cnode->input(0));
MS_ASSERT(primitive_c);
if (node_type == schema::PrimitiveType_Conv2D) {
MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Conv2D>>(primitive_c));
auto primc = utils::cast<std::shared_ptr<mindspore::lite::Conv2D>>(primitive_c);
MS_ASSERT(primc != nullptr);
if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) {
primc->SetActivationType(activation_type);
conv_node->set_abstract(act_node->abstract());
return conv_node;
}
} else if (node_type == schema::PrimitiveType_DepthwiseConv2D) {
MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive_c));
auto primc = utils::cast<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive_c);
MS_ASSERT(primc != nullptr);
if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) {
primc->SetActivationType(activation_type);
conv_node->set_abstract(act_node->abstract());
return conv_node;
}
} else {
MS_LOG(ERROR) << "conv activation pass match only conv2d or depthwise_conv2d ";
}
}
return nullptr;
}
} // namespace mindspore::opt
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*conv_activation_fusion.h
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_
#define MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_
#include <string>
#include "backend/optimizer/common/optimizer.h"
#include "schema/inner/model_generated.h"
namespace mindspore {
namespace opt {
class ConvTupleActivationFusion : public PatternProcessPass {
public:
explicit ConvTupleActivationFusion(bool multigraph = true, const std::string &name = "conv_tuple_activation_fusion",
schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU,
schema::ActivationType activation = schema::ActivationType_LEAKY_RELU)
: PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {}
~ConvTupleActivationFusion() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
schema::PrimitiveType primitive_type;
schema::ActivationType activation_type;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册