diff --git a/mindspore/lite/tools/converter/anf_transform.cc b/mindspore/lite/tools/converter/anf_transform.cc index 87f9b3b541359071b14189126e6b5e30401aabda..60e7f16c90974e7ab363301f9a63de2d67f11a9b 100644 --- a/mindspore/lite/tools/converter/anf_transform.cc +++ b/mindspore/lite/tools/converter/anf_transform.cc @@ -36,7 +36,7 @@ void AnfTransform::SetGraphDef(schema::MetaGraphT *_dstDef) { graphDefT = _dstDe FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph) { // return old_graph; auto optimizer = std::make_shared(); - auto pm = std::make_shared(); + auto pm = std::make_shared("anf fusion pass manager", false); pm->AddPass(std::make_shared()); pm->AddPass(std::make_shared()); pm->AddPass(std::make_shared()); diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.cc b/mindspore/lite/tools/optimizer/common/gllo_utils.cc index f665f63ee059aebf8074bae3bd4f365de6d636d7..6f554652be3d38c22b70f0a536032d648e545fed 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.cc +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.cc @@ -327,7 +327,15 @@ schema::PrimitiveType GetCNodeType(const BaseRef &n) { } bool IsParamNode(const BaseRef &n) { - return utils::isa(n); + if (!utils::isa(n)) { + return false; + } + auto param = utils::cast(n)->default_param(); + auto tensor = std::dynamic_pointer_cast(param); + if (tensor == nullptr) { + return false; + } + return tensor->tensor_addr() != nullptr; } bool IsConvNode(const BaseRef &n) { diff --git a/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc b/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc index 5907700b6d9b919c6dc58be7cb613e7925605031..c4376190424616b66fc391a5e85e52a1cf083c56 100644 --- a/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc +++ b/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc @@ -28,6 +28,8 @@ namespace mindspore { namespace opt { +static size_t count = 0; +constexpr size_t kMaxRepassTimes = 9; const std::vector &PassManager::Passes() const { return passes_; } void PassManager::AddPass(const PassPtr &pass) { @@ -79,9 +81,11 @@ bool PassManager::Run(const FuncGraphPtr &func_graph) const { while (change) { change = Run(func_graph, passes_); changed = change || changed; - if (run_only_once_) { + if (run_only_once_ || count > kMaxRepassTimes) { break; } + count++; + MS_LOG(INFO) << "Run pass counts:" << count; } return changed; } diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index b1d2e03b89de08ea33229cab5b83f0471a75f6a2..51f4de1cd2c8cde2bf6d54cffefa10c17ecdcc16 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -45,12 +45,20 @@ const std::vector GetCNodeInputTensors(const CNodePtr &CNode) { auto tensor_shape = tensorT->dims; auto lite_tensor = new(std::nothrow)Tensor(TypeId(tensorT->dataType), tensor_shape, tensorT->format, tensorT->nodeType); + if (lite_tensor == nullptr) { + MS_LOG(ERROR) << "lite tensor is nullptr"; + return input_tensors; + } auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t); // when tensorT as graph input if (lite_tensor_size == 0) { return input_tensors; } auto tensor_data = new(std::nothrow)char[lite_tensor_size / sizeof(char)]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return input_tensors; + } auto ret = memcpy_s(tensor_data, lite_tensor_size, tensorT->data.data(), lite_tensor_size); if (ret != EOK) { MS_LOG(EXCEPTION) << "memcpy error: " << ret; @@ -97,6 +105,10 @@ const ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *ten if (tensor->Data() != nullptr) { auto size = tensor->ElementsNum(); auto tensor_data = new (std::nothrow) float[size]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return nullptr; + } auto ret = memcpy_s(tensor_data, size * sizeof(float), tensor->Data(), size * sizeof(float)); if (ret != EOK) { MS_LOG(EXCEPTION) << "memcpy error: " << ret; @@ -150,11 +162,15 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An std::vector output_tensors{output_nums, new Tensor()}; auto scheam_primitive = PackPrimitiveT(input_cnode); auto lite_primitive = lite::Primitive::CreatePrimitive(scheam_primitive); + if (lite_primitive == nullptr) { + MS_LOG(DEBUG) << "constant_folding schedule node lite primitive nullptr"; + return nullptr; + } lite_primitive->InferShape(input_tensors, output_tensors); auto lite_kernel = GetLiteKernel(input_tensors, output_tensors, lite_primitive); if (lite_kernel == nullptr) { - MS_LOG(ERROR) << "constant_folding schedule node lite kernel nullptr"; - return any_node; + MS_LOG(DEBUG) << "constant_folding schedule node lite kernel nullptr"; + return nullptr; } auto ret = lite_kernel->Run(); if (0 != ret) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc index e3e8bc272893333cb0bc5528434a2dce6b008982..be9d03d250a563ad80a438125a72e1c16cb2d39c 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc @@ -83,7 +83,11 @@ void GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, c if (kernel_nums <= 0) { MS_LOG(EXCEPTION) << "kernel num less than 0"; } - auto add_bias_data = new (std::nothrow) float[kernel_nums]; + auto add_bias_data = new(std::nothrow) float[kernel_nums]; + if (add_bias_data == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return; + } auto bias_add_weight = bias_node->input(kAddWEIGHTINDEX); CheckIfNodeIsParam(bias_add_weight); auto add_weight_param = bias_add_weight->cast()->default_param(); @@ -140,7 +144,7 @@ const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, cons AnfNodePtr conv_node_anf = add_node->input(1); CheckIfAnfNodeIsNull(conv_node_anf); if (IsMultiOutputTensors(func_graph, conv_node_anf)) { - return add_node; + return nullptr; } auto conv_node = conv_node_anf->cast(); CheckIfCNodeIsNull(conv_node); diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 5f9f51201058b1efdbbd736f378a04f1882102e6..0ac962b3a561ad6fcfb0fa768eb1179368777a4a 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -67,7 +67,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co auto pre_node = transform_node->input(1); auto conv_node = pre_node->cast(); if (IsMultiOutputTensors(func_graph, conv_node)) { - return transform_node; + return nullptr; } auto abstr = transform_node->abstract(); @@ -76,8 +76,16 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co MS_LOG(ERROR) << "Unsupported conv node, " << conv_node->DebugString(); return node; } - auto trans_scale = new (std::nothrow) float[kernel_nums]; - auto trans_bias = new (std::nothrow) float[kernel_nums]; + auto trans_scale = new(std::nothrow) float[kernel_nums]; + if (trans_scale == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return nullptr; + } + auto trans_bias = new(std::nothrow) float[kernel_nums]; + if (trans_bias == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return nullptr; + } GenTransParam(transform_node, kernel_nums, trans_scale, trans_bias); GenNewConvTensor(func_graph, conv_node, kernel_nums, trans_scale, trans_bias); delete[] trans_bias; @@ -155,7 +163,11 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, bias_data = reinterpret_cast(bias_tensor->tensor_addr()); bias_flag = true; } else { - bias_data = new (std::nothrow) float[kernel_num]; + bias_data = new(std::nothrow) float[kernel_num]; + if (trans_scale == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return; + } } CalNewBiasTensor(bias_data, kernel_num, bias_flag, trans_scale, trans_bias); if (!bias_flag) { @@ -193,7 +205,11 @@ const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_nu const float *trans_scale, const float *trans_bias) const { MS_ASSERT(bias_data != nullptr); if (bias_flag) { - auto tmp_bias_data = new (std::nothrow) float[kernel_num]; + auto tmp_bias_data = new(std::nothrow) float[kernel_num]; + if (tmp_bias_data == nullptr) { + MS_LOG(ERROR) << "tensor_data is nullptr"; + return; + } if (EOK != memset_s(tmp_bias_data, kernel_num * sizeof(float), 0, kernel_num * sizeof(float))) { MS_LOG(EXCEPTION) << "memset bias data failed"; }