diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc b/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc index 9cf794e4a40b1383c753b28e2baa2199d076a6e3..0515bb52f37006dcc7e4dded28b10eac0ae3bd6d 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc @@ -18,6 +18,14 @@ #include namespace mindspore { namespace lite { +AnfNodePopulaterRegistry::~AnfNodePopulaterRegistry() { + for (auto ite : populaters) { + if (ite.second != nullptr) { + delete ite.second; + ite.second = nullptr; + } + } +} AnfNodePopulaterRegistry *AnfNodePopulaterRegistry::GetInstance() { static AnfNodePopulaterRegistry instance; return &instance; diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.h b/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.h index 2d1ebb74bf6b646e7a059a444a0bce966d21c2e3..2f7b984fe298148afc64ba9263b7f8ab6cd7dc41 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.h +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.h @@ -23,7 +23,7 @@ namespace mindspore::lite { class AnfNodePopulaterRegistry { public: AnfNodePopulaterRegistry() = default; - virtual ~AnfNodePopulaterRegistry() = default; + virtual ~AnfNodePopulaterRegistry(); static AnfNodePopulaterRegistry *GetInstance(); AnfNodePopulater *GetNodePopulater(const std::string &name); void SetNodePopulater(const std::string &name, AnfNodePopulater *populater); diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc index 6447100f6de7feae3679c53892c5ce16ce5dd50e..0c1e046bebfad65184faccec381051331c66d44d 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc @@ -140,6 +140,8 @@ bool AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &nod auto ret = memcpy_s(tensor_data_buf, tensor_info->Size(), initial_data.data(), initial_data.size()); if (EOK != ret) { MS_LOG(ERROR) << "memcpy_s error"; + delete tensor_data_buf; + delete tensor_info; return false; } diff --git a/mindspore/lite/tools/converter/converter.cc b/mindspore/lite/tools/converter/converter.cc index f55146dfaa2269dea32fa2d5f5bbfbe5b384c0e0..3f2788acee8a813097b4fb2bf2552f62da789886 100644 --- a/mindspore/lite/tools/converter/converter.cc +++ b/mindspore/lite/tools/converter/converter.cc @@ -43,18 +43,10 @@ Converter::Converter() { } Converter::~Converter() { - if (nullptr != modelParser) { - delete modelParser; - } - if (nullptr != modelImporter) { - delete modelImporter; - } - if (nullptr != transform) { - delete transform; - } - if (nullptr != anfTransform) { - delete anfTransform; - } + delete modelParser; + delete modelImporter; + delete transform; + delete anfTransform; } class MindsporeImporter : public Converter { @@ -154,7 +146,11 @@ void Converter::CreateQuantizer(FuncGraphPtr funcGraph, const converter::Flags * } } int RunConverter(int argc, const char **argv) { - auto flags = new converter::Flags; + std::unique_ptr flags(new (std::nothrow) converter::Flags); + if (flags == nullptr) { + MS_LOG(ERROR) << "new flags error "; + return RET_ERROR; + } auto status = flags->Init(argc, argv); if (status == RET_SUCCESS_EXIT) { return 0; @@ -173,20 +169,20 @@ int RunConverter(int argc, const char **argv) { auto graph = std::make_shared(); auto onnx_graph = AnfImporterFromProtobuf::ReadOnnxFromBinary(flags->modelFile); MindsporeImporter mindsporeImporter(onnx_graph, graph); - fb_graph = mindsporeImporter.Convert(flags); + fb_graph = mindsporeImporter.Convert(flags.get()); break; } case FmkType::FmkType_CAFFE: { CaffeConverter caffeConverter; - fb_graph = caffeConverter.Convert(flags); + fb_graph = caffeConverter.Convert(flags.get()); } break; case FmkType::FmkType_TFLITE: { TfliteConverter tfLiteConverter; - fb_graph = tfLiteConverter.Convert(flags); + fb_graph = tfLiteConverter.Convert(flags.get()); } break; case FmkType::FmkType_ONNX: { OnnxConverter onnxConverter; - fb_graph = onnxConverter.Convert(flags); + fb_graph = onnxConverter.Convert(flags.get()); } break; default: { MS_LOG(ERROR) << "Unsupported fmkType: " << flags->fmk; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc index 7b2d0eda29190a88ca6b31b0750d0a84af5a238c..6fad87a412b956d26cd0d283ac56f6123d4c921a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc @@ -26,7 +26,7 @@ void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::C } std::unique_ptr depthwiseConv2DParam = std::make_unique(); if (depthwiseConv2DParam == nullptr) { - // MS_LOGW("new DepthwiseConv2DT failed"); + MS_LOG(ERROR) << "new DepthwiseConv2DT failed"; return; } depthwiseConv2DParam->format = attr->format; @@ -53,8 +53,11 @@ void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::C STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { op->name = proto.name(); - schema::Conv2DT *attr = new schema::Conv2DT(); - + std::unique_ptr attr(new (std::nothrow) schema::Conv2DT()); + if (attr == nullptr) { + MS_LOG(ERROR) << "new Conv2DT failed"; + return RET_ERROR; + } attr->format = schema::Format_NCHW; const caffe::ConvolutionParameter convParam = proto.convolution_param(); @@ -118,9 +121,9 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c attr->padMode = schema::PadMode_CAFFE; op->primitive = std::make_unique(); op->primitive->value.type = schema::PrimitiveType_Conv2D; - op->primitive->value.value = attr; + op->primitive->value.value = attr.get(); - ParseGroupConvolution(op, attr); + ParseGroupConvolution(op, attr.release()); status = convParser.ParseWeight(weight, weightVec); if (status != RET_OK) { MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed"; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc index eceadc8b94f63fa9837b50f7a1effec56572b960..6fccbc9a9527e8e9f0f912cfc8a3806449f44242 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc @@ -159,7 +159,7 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr auto isConst = (!tensor_buffer->data.empty()); if (isConst) { CopyConstTensorData(tflite_model_buffer, tflite_tensor.get(), tensor.get()); - } else if (tensor->dataType ==TypeId::kNumberTypeUInt8) { + } else if (tensor->dataType == TypeId::kNumberTypeUInt8) { // set in/out tensor to int8 to fit ms-lite op tensor->dataType = TypeId::kNumberTypeInt8; } diff --git a/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc b/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc index 93fb2ad1804b0906c75f10d6520881e3643e073b..9e0d50b7c99fcf9b45f6da8efda7ec0613d4bbb9 100644 --- a/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc @@ -103,11 +103,13 @@ AwareQuantizer::AwareQuantizer(schema::MetaGraphT *graph, const float stdValue = std::stof(stdValues, &sz); sz = 0; const float mean = std::stof(meanValues, &sz); + std::unique_ptr inArr = nullptr; if (inputInferType == "FLOAT") { - mInputArray = new InputArray(mean, stdValue); + inArr.reset(new (std::nothrow) InputArray(mean, stdValue)); } else { - mInputArray = new InputArray(mean, stdValue, TypeId::kNumberTypeUInt8); + inArr.reset(new (std::nothrow) InputArray(mean, stdValue, TypeId::kNumberTypeUInt8)); } + mInputArray = inArr.get(); mInputArray->InitQuantParam(); } @@ -527,9 +529,9 @@ STATUS AwareQuantizer::QuantConvBias(const mindspore::schema::MetaGraphT *graph, // quant bias data auto bShapeSize = GetShapeSize(*(biasTensor.get())); - auto *qDatas = new (std::nothrow) int32_t[bShapeSize]; + std::unique_ptr qDatas(new (std::nothrow) int32_t[bShapeSize]); if (qDatas == nullptr) { - // MS_LOGE("new qDatas failed"); + MS_LOG(ERROR) << "new qDatas failed"; return RET_ERROR; } void *biasData = biasTensor->data.data(); @@ -541,13 +543,11 @@ STATUS AwareQuantizer::QuantConvBias(const mindspore::schema::MetaGraphT *graph, biasTensor->data.clear(); biasTensor->data.resize(bShapeSize * sizeof(int32_t)); auto ret = memcpy_s(biasTensor->data.data(), bShapeSize * sizeof(int32_t), - qDatas, bShapeSize * sizeof(int32_t)); + qDatas.get(), bShapeSize * sizeof(int32_t)); if (ret != EOK) { - // MS_LOGE("memcpy_s failed: %d", ret); - delete[] qDatas; + MS_LOG(ERROR) << "memcpy_s failed: " << ret; return RET_ERROR; } - delete[] qDatas; return RET_OK; } diff --git a/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc b/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc index 5d129b0b76abbd39fdeccba356e2204ba86e0c80..0ccd0c703633ccb406bb866d1a9c484258c65351 100644 --- a/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc +++ b/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc @@ -441,50 +441,62 @@ class CalcActivation : public QuantParamCalcer { } } }; - +QuantParamCalcRegister::~QuantParamCalcRegister() { + for (auto ite : _registerMap) { + if (ite.second != nullptr) { + delete ite.second; + ite.second = nullptr; + } + } +} QuantParamCalcRegister::QuantParamCalcRegister() { bool hasError = false; - auto baseCalcer = new (std::nothrow) QuantParamCalcer(); + std::unique_ptr baseCalcer(new (std::nothrow) QuantParamCalcer()); if (baseCalcer == nullptr) { - // MS_LOGW("new QuantParamCalcer failed"); + MS_LOG(ERROR) << "new QuantParamCalcer failed"; hasError = true; } - auto commonCalcer = new (std::nothrow) CommonCalcer(); + std::unique_ptr commonCalcer(new (std::nothrow) CommonCalcer()); if (commonCalcer == nullptr) { - // MS_LOGW("new commonCalcer failed"); + MS_LOG(ERROR) << "new commonCalcer failed"; hasError = true; } - auto linearCalcer = new (std::nothrow) LinearCalcer(); + + std::unique_ptr linearCalcer(new (std::nothrow) LinearCalcer()); if (linearCalcer == nullptr) { - // MS_LOGW("new linearCalcer failed"); + MS_LOG(ERROR) << "new linearCalcer failed"; hasError = true; } if (!hasError) { _registerMap[schema::PrimitiveType_Concat] = new CalcConcat(); _registerMap[schema::PrimitiveType_Activation] = new CalcActivation(); _registerMap[schema::PrimitiveType_Add] = new CalcAdd(); - _registerMap[schema::PrimitiveType_Mul] = commonCalcer; - _registerMap[schema::PrimitiveType_Conv2D] = commonCalcer; - _registerMap[schema::PrimitiveType_DepthwiseConv2D] = commonCalcer; - _registerMap[schema::PrimitiveType_Pooling] = linearCalcer; - _registerMap[schema::PrimitiveType_Resize] = linearCalcer; - _registerMap[schema::PrimitiveType_Reshape] = linearCalcer; - _registerMap[schema::PrimitiveType_Shape] = linearCalcer; + _registerMap[schema::PrimitiveType_Mul] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_Conv2D] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_DepthwiseConv2D] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_Pooling] = linearCalcer.get(); + _registerMap[schema::PrimitiveType_Resize] = linearCalcer.get(); + _registerMap[schema::PrimitiveType_Reshape] = linearCalcer.get(); + _registerMap[schema::PrimitiveType_Shape] = linearCalcer.get(); _registerMap[schema::PrimitiveType_SoftMax] = new CalcToSet(0, 1); - _registerMap[schema::PrimitiveType_Squeeze] = linearCalcer; + _registerMap[schema::PrimitiveType_Squeeze] = linearCalcer.get(); _registerMap[schema::PrimitiveType_RealDiv] = new CalcRealDiv(); - _registerMap[schema::PrimitiveType_Reduce] = commonCalcer; - _registerMap[schema::PrimitiveType_BiasAdd] = commonCalcer; - _registerMap[schema::PrimitiveType_Mean] = linearCalcer; - _registerMap[schema::PrimitiveType_Transpose] = linearCalcer; - _registerMap[schema::PrimitiveType_MatMul] = commonCalcer; - _registerMap[schema::PrimitiveType_FullConnection] = commonCalcer; - _registerMap[schema::PrimitiveType_Nchw2Nhwc] = linearCalcer; - _registerMap[schema::PrimitiveType_Nhwc2Nchw] = linearCalcer; + _registerMap[schema::PrimitiveType_Reduce] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_BiasAdd] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_Mean] = linearCalcer.get(); + _registerMap[schema::PrimitiveType_Transpose] = linearCalcer.get(); + _registerMap[schema::PrimitiveType_MatMul] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_FullConnection] = commonCalcer.get(); + _registerMap[schema::PrimitiveType_Nchw2Nhwc] = linearCalcer.get(); + _registerMap[schema::PrimitiveType_Nhwc2Nchw] = linearCalcer.get(); + // todo // detection_postprocess op's quant param will not infer only fetch from preNode or postNode // because we will not insert quantTransNode after this node in tflite_graph_8bit model if input data is float. // if quantTransNode is inserted after detection_postprocess node, there will be some errors - _registerMap[schema::PrimitiveType_DetectionPostProcess] = baseCalcer; + _registerMap[schema::PrimitiveType_DetectionPostProcess] = baseCalcer.get(); + baseCalcer.release(); + linearCalcer.release(); + commonCalcer.release(); } } diff --git a/mindspore/lite/tools/converter/quantizer/calc_quant_param.h b/mindspore/lite/tools/converter/quantizer/calc_quant_param.h index 31455f30d239d9ee85de4eff8fdf29a5887858d5..4eae30e4d75c683b09a1a249876715625494a547 100644 --- a/mindspore/lite/tools/converter/quantizer/calc_quant_param.h +++ b/mindspore/lite/tools/converter/quantizer/calc_quant_param.h @@ -55,7 +55,7 @@ class LinearCalcer : public QuantParamCalcer { class QuantParamCalcRegister { public: - virtual ~QuantParamCalcRegister() = default; + virtual ~QuantParamCalcRegister(); QuantParamCalcer *GetQuantParamCalcer(schema::PrimitiveType opType); static QuantParamCalcRegister *GetInstance(); diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index 595ba6d6a00c21af45edea1f4aa0bb24e72e5f02..b2f788513af414eec56bf4ec41d32fe342a1cc12 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -55,14 +55,18 @@ const std::vector GetCNodeInputTensors(const CNodePtr &CNode) { if (lite_tensor_size == 0) { return input_tensors; } - auto tensor_data = new(std::nothrow)char[lite_tensor_size / sizeof(char)]; + auto tensor_data = new (std::nothrow) char[lite_tensor_size / sizeof(char)]; if (tensor_data == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; + delete lite_tensor; return input_tensors; } auto ret = memcpy_s(tensor_data, lite_tensor_size, tensorT->data.data(), lite_tensor_size); if (ret != EOK) { + delete lite_tensor; + delete tensor_data; MS_LOG(EXCEPTION) << "memcpy error: " << ret; + return input_tensors; } lite_tensor->SetData(tensor_data); input_tensors.emplace_back(lite_tensor); @@ -111,7 +115,9 @@ const ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *ten } auto ret = memcpy_s(tensor_data, size * sizeof(float), tensor->Data(), size * sizeof(float)); if (ret != EOK) { + delete tensor_data; MS_LOG(EXCEPTION) << "memcpy error: " << ret; + return parameter; } param_value->set_tensor_addr(tensor_data); param_value->set_tensor_size(size * sizeof(float) / sizeof(uint8_t)); @@ -138,7 +144,17 @@ kernel::LiteKernel *GetLiteKernel(std::vector inputs, std::vector *input_tensor) { + MS_ASSERT(input_tensor != nullptr); + for (size_t i = 0; i < input_tensor->size(); i++) { + if ((*input_tensor)[i] == nullptr) { + continue; + } + delete (*input_tensor)[i]; + (*input_tensor)[i] = nullptr; + } + return; +} const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { CheckIfFuncGraphIsNull(func_graph); @@ -154,6 +170,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto input_cnode = input_node->cast(); auto input_tensors = GetCNodeInputTensors(input_cnode); if (input_tensors.empty() || input_tensors.size() != input_cnode->inputs().size() - 1) { + FreeInputTensor(&input_tensors); return any_node; } MS_LOG(INFO) << "Begin fold node:" << input_node->fullname_with_scope(); @@ -163,21 +180,25 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto lite_primitive = mindspore::lite::PrimitiveC::CreatePrimitive(scheam_primitive); if (lite_primitive == nullptr) { MS_LOG(DEBUG) << "constant_folding schedule node lite primitive nullptr"; + FreeInputTensor(&input_tensors); return nullptr; } lite_primitive->InferShape(input_tensors, output_tensors); auto lite_kernel = GetLiteKernel(input_tensors, output_tensors, lite_primitive); if (lite_kernel == nullptr) { MS_LOG(DEBUG) << "constant_folding schedule node lite kernel nullptr"; + FreeInputTensor(&input_tensors); return nullptr; } auto ret = lite_kernel->Run(); if (0 != ret) { + FreeInputTensor(&input_tensors); MS_LOG(EXCEPTION) << "run kernel failed, name: " << lite_kernel->name(); } auto new_parameter = CreateNewParamter(func_graph, output_tensors.front()); new_parameter->set_name(input_node->fullname_with_scope()); any_node->set_input(i, new_parameter); + FreeInputTensor(&input_tensors); } } return any_node; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 0ac962b3a561ad6fcfb0fa768eb1179368777a4a..e6198da62282501fab3b9f5625f87a9d017b44d2 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -73,7 +73,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co auto abstr = transform_node->abstract(); int kernel_nums = Get_Kenrnel_nums(conv_node); if (kernel_nums <= 0) { - MS_LOG(ERROR) << "Unsupported conv node, " << conv_node->DebugString(); + MS_LOG(INFO) << "Unsupported conv node, " << conv_node->DebugString(); return node; } auto trans_scale = new(std::nothrow) float[kernel_nums]; @@ -84,6 +84,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co auto trans_bias = new(std::nothrow) float[kernel_nums]; if (trans_bias == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; + delete trans_scale; return nullptr; } GenTransParam(transform_node, kernel_nums, trans_scale, trans_bias); @@ -164,7 +165,7 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, bias_flag = true; } else { bias_data = new(std::nothrow) float[kernel_num]; - if (trans_scale == nullptr) { + if (bias_data == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; return; }