diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index c078f9c7b00e89fcbaa2188313e8260f3df5c2c9..8e34bfe1df7edade85b26eab28705b0011edcb20 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -246,6 +246,7 @@ if(BUILD_CONVERTER) ${LITE_DIR}/tools/optimizer/common/gllo_utils.cc ${LITE_DIR}/tools/optimizer/fusion/conv_biasadd_fusion.cc ${LITE_DIR}/tools/optimizer/fusion/conv_activation_fusion.cc + ${LITE_DIR}/tools/optimizer/fusion/conv_tuple_activation_fusion.cc ${LITE_DIR}/tools/optimizer/fusion/conv_transform_fusion.cc ${LITE_DIR}/tools/optimizer/fusion/conv_scale_fusion.cc ${LITE_DIR}/tools/optimizer/fusion/conv_bn_fusion.cc diff --git a/mindspore/lite/tools/converter/CMakeLists.txt b/mindspore/lite/tools/converter/CMakeLists.txt index da853e87813ed4f3897dc5adc39f344d99b5addc..1c74d87300cfc3832327ed4f6bfd13a20cfdb1f8 100644 --- a/mindspore/lite/tools/converter/CMakeLists.txt +++ b/mindspore/lite/tools/converter/CMakeLists.txt @@ -102,6 +102,7 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ../optimizer/common/gllo_utils.cc ../optimizer/fusion/conv_biasadd_fusion.cc ../optimizer/fusion/conv_activation_fusion.cc + ../optimizer/fusion/conv_tuple_activation_fusion.cc ../optimizer/fusion/conv_transform_fusion.cc ../optimizer/fusion/conv_scale_fusion.cc ../optimizer/fusion/conv_bn_fusion.cc diff --git a/mindspore/lite/tools/converter/anf_transform.cc b/mindspore/lite/tools/converter/anf_transform.cc index 2501121890c94d417346a55c6a017721d48cef4e..8542084d0a8e572946f7be35ee1c3a238303fcc7 100644 --- a/mindspore/lite/tools/converter/anf_transform.cc +++ b/mindspore/lite/tools/converter/anf_transform.cc @@ -20,6 +20,7 @@ #include "utils/log_adapter.h" #include "tools/optimizer/fusion/conv_biasadd_fusion.h" #include "tools/optimizer/fusion/conv_activation_fusion.h" +#include "tools/optimizer/fusion/conv_tuple_activation_fusion.h" #include "tools/optimizer/fusion/conv_scale_fusion.h" #include "tools/optimizer/fusion/conv_bn_fusion.h" #include "tools/optimizer/fusion/constant_folding_fusion.h" @@ -44,6 +45,12 @@ FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph) { schema::ActivationType_RELU)); pm->AddPass(std::make_shared(true, "conv_relu6", schema::PrimitiveType_Activation, schema::ActivationType_RELU6)); + pm->AddPass(std::make_shared(true, "conv_tuple_relu", + schema::PrimitiveType_Activation, + schema::ActivationType_RELU)); + pm->AddPass(std::make_shared(true, "conv_tuple_relu6", + schema::PrimitiveType_Activation, + schema::ActivationType_RELU6)); pm->AddPass(std::make_shared()); optimizer->AddPassManager(pm); FuncGraphPtr new_graph = optimizer->Optimize(old_graph); diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc new file mode 100644 index 0000000000000000000000000000000000000000..3964193a9731e79ef1714c0edca65932e27e09b2 --- /dev/null +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc @@ -0,0 +1,99 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + *conv_activation_fusion.h + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tools/optimizer/fusion/conv_tuple_activation_fusion.h" +#include +#include "src/ops/primitive_c.h" +#include "src/ops/conv2d.h" +#include "src/ops/depthwise_conv2d.h" +#include "src/ops/activation.h" +#include "schema/inner/model_generated.h" +#include "tools/optimizer/common/gllo_utils.h" + +namespace mindspore::opt { +namespace { +constexpr size_t kActivationInputsLength = 2; +} +const BaseRef ConvTupleActivationFusion::DefinePattern() const { + auto conv_var = std::make_shared(IsConvNode); + auto tuple_index = std::make_shared(); + auto tuple_prim = new schema::PrimitiveT(); + tuple_prim->value.type = schema::PrimitiveType_TupleGetItem; + auto tuple_value = std::make_shared(tuple_prim); + VectorRef tuple_get_item = VectorRef({tuple_value, conv_var, tuple_index}); + + auto act_prim = new schema::PrimitiveT(); + act_prim->value.type = primitive_type; + auto act_value = std::make_shared(act_prim); + + return VectorRef({act_value, tuple_get_item}); +} + +const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const EquivPtr &) const { + MS_LOG(DEBUG) << "conv tuple activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; + CheckIfFuncGraphIsNull(func_graph); + + CheckIfAnfNodeIsNull(node); + auto act_node = node->cast(); + CheckIfCNodeIsNull(act_node); + CheckInputSize(act_node, kActivationInputsLength); + + auto primitivec = GetValueNode>(act_node->input(0)); + MS_ASSERT(utils::isa>(primitivec)); + auto act_primitivec = utils::cast>(primitivec); + MS_ASSERT(act_primitivec != nullptr); + if (act_primitivec->GetType() != activation_type) { + return nullptr; + } + AnfNodePtr tuple_node = act_node->input(1); + MS_ASSERT(tuple_node != nullptr); + auto tuple_cnode = tuple_node->cast(); + auto conv_node = tuple_cnode->input(1); + CheckIfAnfNodeIsNull(conv_node); + if (conv_node != nullptr && conv_node->isa()) { + if (IsMultiOutputTensors(func_graph, conv_node)) { + return nullptr; + } + auto conv_cnode = conv_node->cast(); + auto node_type = GetCNodeType(conv_cnode); + auto primitive_c = GetValueNode>(conv_cnode->input(0)); + MS_ASSERT(primitive_c); + if (node_type == schema::PrimitiveType_Conv2D) { + MS_ASSERT(utils::isa>(primitive_c)); + auto primc = utils::cast>(primitive_c); + MS_ASSERT(primc != nullptr); + if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { + primc->SetActivationType(activation_type); + conv_node->set_abstract(act_node->abstract()); + return conv_node; + } + } else if (node_type == schema::PrimitiveType_DepthwiseConv2D) { + MS_ASSERT(utils::isa>(primitive_c)); + auto primc = utils::cast>(primitive_c); + MS_ASSERT(primc != nullptr); + if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { + primc->SetActivationType(activation_type); + conv_node->set_abstract(act_node->abstract()); + return conv_node; + } + } else { + MS_LOG(ERROR) << "conv activation pass match only conv2d or depthwise_conv2d "; + } + } + return nullptr; +} +} // namespace mindspore::opt diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..cc9344615ab225c64f16c6ee6e34240deae12f25 --- /dev/null +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + *conv_activation_fusion.h + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_ +#define MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_ + +#include +#include "backend/optimizer/common/optimizer.h" +#include "schema/inner/model_generated.h" + +namespace mindspore { +namespace opt { +class ConvTupleActivationFusion : public PatternProcessPass { + public: + explicit ConvTupleActivationFusion(bool multigraph = true, const std::string &name = "conv_tuple_activation_fusion", + schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, + schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) + : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} + ~ConvTupleActivationFusion() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; + schema::PrimitiveType primitive_type; + schema::ActivationType activation_type; +}; +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_