提交 13f52ef1 编写于 作者: H hangq

fix bug in lite_session and tensor for deconstruct session

上级 8a0b3e23
......@@ -56,10 +56,10 @@ class MS_API Context {
/// \brief Constructor of MindSpore Lite Context using input value for parameters.
///
/// \param[in] threadNum Define the threadNum during the runtime.
/// \param[in] thread_num Define the threadNum during the runtime.
/// \param[in] allocator Define the allocator for malloc.
/// \param[in] deviceCtx Define device information during the runtime.
Context(int threadNum, std::shared_ptr<Allocator> allocator, DeviceContext deviceCtx);
/// \param[in] device_ctx Define device information during the runtime.
Context(int thread_num, std::shared_ptr<Allocator> allocator, DeviceContext device_ctx);
/// \brief Destructor of MindSpore Lite Context.
virtual ~Context();
......
......@@ -44,7 +44,7 @@ char *ReadFile(const char *file, size_t *size) {
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
std::unique_ptr<char> buf(new (std::nothrow) char[*size]);
std::unique_ptr<char[]> buf(new (std::nothrow) char[*size]);
if (buf == nullptr) {
MS_LOG(ERROR) << "malloc buf failed, file: " << realPath;
ifs.close();
......@@ -165,4 +165,3 @@ void CompareOutput(float *output_data, std::string file_path) {
} // namespace lite
} // namespace mindspore
......@@ -33,7 +33,7 @@ std::vector<MSTensor *> PackToMSTensors(const std::vector<Tensor *> &in_tensors)
MS_LOG(ERROR) << "new LiteTensor failed";
return ret;
}
ret.emplace_back();
ret.emplace_back(ms_tensor);
}
return ret;
}
......
......@@ -22,10 +22,10 @@ Context::Context() { allocator = Allocator::Create(); }
Context::~Context() = default;
Context::Context(int threadNum, std::shared_ptr<Allocator> allocator, DeviceContext deviceCtx) {
Context::Context(int thread_num, std::shared_ptr<Allocator> allocator, DeviceContext device_ctx) {
this->allocator = std::move(allocator);
this->thread_num_ = threadNum;
this->device_ctx_ = std::move(deviceCtx);
this->thread_num_ = thread_num;
this->device_ctx_ = device_ctx;
}
} // namespace mindspore::lite
......@@ -74,7 +74,11 @@ int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) {
Tensor::~Tensor() {
if (nullptr != this->data_) {
free(this->data_);
if (this->allocator_ != nullptr) {
this->allocator_->Free(this->data_);
} else {
free(this->data_);
}
}
}
......@@ -320,4 +324,3 @@ MSTensor *MSTensor::CreateTensor(TypeId data_type, const std::vector<int> &shape
}
} // namespace tensor
} // namespace mindspore
......@@ -220,8 +220,13 @@ void LiteSession::BindThread(bool ifBind) {
LiteSession::~LiteSession() {
for (auto *tensor : tensors) {
// weight data can not be to free, we will free weight data when freeing meta_graph
if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs, tensor)) {
tensor->SetData(nullptr);
}
delete tensor;
}
// inputs outputs input_map output_map are freed in tensors
for (auto *input : inputs) {
((tensor::LiteTensor *)input)->SetTensorImpl(nullptr);
delete input;
......@@ -230,9 +235,26 @@ LiteSession::~LiteSession() {
((tensor::LiteTensor *)output)->SetTensorImpl(nullptr);
delete output;
}
for (auto iter : this->input_map) {
for (auto *ms_tensor : iter.second) {
((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr);
delete ms_tensor;
}
iter.second.clear();
}
input_map.clear();
for (auto iter : this->output_map) {
for (auto *ms_tensor : iter.second) {
((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr);
delete ms_tensor;
}
iter.second.clear();
}
output_map.clear();
for (auto *kernel : kernels) {
delete kernel;
}
delete this->context_;
}
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputsByName(const std::string &name) const {
......
......@@ -56,7 +56,7 @@ lite::Primitive *ModelImpl::GetOp(const std::string &name) const {
}
ModelImpl::~ModelImpl() {
delete (this->model_buf_);
delete[](this->model_buf_);
for (auto iter : ops) {
delete (iter.second);
}
......@@ -64,7 +64,7 @@ ModelImpl::~ModelImpl() {
}
void ModelImpl::FreeMetaGraph() {
delete this->model_buf_;
delete[](this->model_buf_);
model_buf_ = nullptr;
}
......@@ -200,4 +200,3 @@ int ModelImpl::BuildOps() {
return 0;
}
} // namespace mindspore::lite
......@@ -32,11 +32,11 @@ namespace mindspore::kernel {
ArithmeticCPUKernel::~ArithmeticCPUKernel() {
if (tile_data0_ != nullptr) {
free(tile_data0_);
delete[](tile_data0_);
tile_data0_ = nullptr;
}
if (tile_data1_ != nullptr) {
free(tile_data1_);
delete[](tile_data1_);
tile_data1_ = nullptr;
}
}
......
......@@ -175,11 +175,11 @@ kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector<tensor::Tensor *>
kernel::KernelKey key{desc.arch, kNumberTypeFloat16, desc.type};
kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, key);
if (kernel != nullptr) {
MS_LOG(INFO) << "Get fp16 op success.";
MS_LOG(DEBUG) << "Get fp16 op success.";
kernel->set_desc(desc);
return kernel;
}
MS_LOG(INFO) << "Get fp16 op failed, back to fp32 op.";
MS_LOG(DEBUG) << "Get fp16 op failed, back to fp32 op.";
kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, desc);
} else {
kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, desc);
......
......@@ -34,7 +34,7 @@ int Benchmark::GenerateRandomData(size_t size, void *data) {
for (size_t i = 0; i < size; i++) {
castedData[i] = static_cast<char>(i);
}
return 0;
return RET_OK;
}
int Benchmark::GenerateInputData() {
......@@ -53,7 +53,7 @@ int Benchmark::GenerateInputData() {
return status;
}
}
return 0;
return RET_OK;
}
int Benchmark::LoadInput() {
......@@ -70,12 +70,12 @@ int Benchmark::LoadInput() {
return status;
}
}
return 0;
return RET_OK;
}
int Benchmark::ReadInputFile() {
if (msInputs.empty()) {
return 0;
return RET_OK;
}
if (this->_flags->inDataType == kImage) {
......@@ -104,7 +104,7 @@ int Benchmark::ReadInputFile() {
memcpy(inputData, binBuf, tensorDataSize);
}
}
return 0;
return RET_OK;
}
// calibData is FP32
......@@ -114,13 +114,13 @@ int Benchmark::ReadCalibData() {
std::ifstream inFile(calibDataPath);
if (!inFile.good()) {
MS_LOG(ERROR) << "file: " << calibDataPath << " is not exist";
return 1;
return RET_ERROR;
}
if (!inFile.is_open()) {
MS_LOG(ERROR) << "file: " << calibDataPath << " open failed";
inFile.close();
return 1;
return RET_ERROR;
}
std::string line;
......@@ -155,7 +155,7 @@ int Benchmark::ReadCalibData() {
}
inFile.close();
MS_LOG(INFO) << "Finish reading calibData file";
return 0;
return RET_OK;
}
// tensorData need to be converter first
......@@ -182,7 +182,7 @@ float Benchmark::CompareData(const std::string &nodeName, std::vector<int> msSha
}
oss << ") are different";
MS_LOG(ERROR) << "%s", oss.str().c_str();
return -1;
return RET_ERROR;
}
size_t errorCount = 0;
float meanError = 0;
......@@ -218,7 +218,7 @@ float Benchmark::CompareData(const std::string &nodeName, std::vector<int> msSha
return meanError;
} else {
MS_LOG(INFO) << "%s is not in Source Model output", nodeName.c_str();
return -1;
return RET_ERROR;
}
}
......@@ -257,14 +257,14 @@ int Benchmark::CompareOutput() {
if (meanBias > this->_flags->accuracyThreshold) {
MS_LOG(ERROR) << "Mean bias of all nodes is too big: " << meanBias << "%%";
return 1;
return RET_ERROR;
} else {
return 0;
return RET_OK;
}
} else {
MS_LOG(ERROR) << "Error in CompareData";
std::cout << "=======================================================" << std::endl << std::endl;
return 1;
return RET_ERROR;
}
}
......@@ -309,7 +309,7 @@ int Benchmark::MarkPerformance() {
_flags->modelPath.substr(_flags->modelPath.find_last_of(DELIM_SLASH) + 1).c_str(), _flags->numThreads,
timeMin / 1000.0f, timeMax / 1000.0f, timeAvg / 1000.0f);
}
return 0;
return RET_OK;
}
int Benchmark::MarkAccuracy() {
......@@ -341,7 +341,7 @@ int Benchmark::MarkAccuracy() {
MS_LOG(ERROR) << "Compare output error " << status;
return status;
}
return 0;
return RET_OK;
}
int Benchmark::RunBenchmark(const std::string &deviceType) {
......@@ -353,15 +353,25 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
size_t size = 0;
char *graphBuf = ReadFile(_flags->modelPath.c_str(), &size);
if (graphBuf == nullptr) {
MS_LOG(ERROR) << "Load graph failed while running %s", modelName.c_str();
return 1;
MS_LOG(ERROR) << "Read model file failed while running %s", modelName.c_str();
return RET_ERROR;
}
auto model = lite::Model::Import(graphBuf, size);
auto context = new lite::Context;
if (model == nullptr) {
MS_LOG(ERROR) << "Import model file failed while running %s", modelName.c_str();
delete[](graphBuf);
return RET_ERROR;
}
delete[](graphBuf);
auto context = new(std::nothrow) lite::Context;
if (context == nullptr) {
MS_LOG(ERROR) << "New context failed while running %s", modelName.c_str();
return RET_ERROR;
}
if (_flags->device == "CPU") {
context->device_ctx_.type = lite::DT_CPU;
} else if (_flags->device == "GPU") {
context->device_ctx_.type = lite::DT_GPU;
context->device_ctx_.type = lite::DT_GPU;
} else {
context->device_ctx_.type = lite::DT_NPU;
}
......@@ -375,8 +385,15 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
}
context->thread_num_ = _flags->numThreads;
session = session::LiteSession::CreateSession(context);
delete(context);
if (session == nullptr) {
MS_LOG(ERROR) << "CreateSession failed while running %s", modelName.c_str();
return RET_ERROR;
}
auto ret = session->CompileGraph(model.get());
if (ret != RET_OK) {
MS_LOG(ERROR) << "CompileGraph failed while running %s", modelName.c_str();
delete(session);
return ret;
}
msInputs = session->GetInputs();
......@@ -394,21 +411,21 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
auto status = LoadInput();
if (status != 0) {
MS_LOG(ERROR) << "Generate input data error";
delete graphBuf;
delete(session);
return status;
}
if (!_flags->calibDataPath.empty()) {
status = MarkAccuracy();
if (status != 0) {
MS_LOG(ERROR) << "Run MarkAccuracy error: %d" << status;
delete graphBuf;
delete(session);
return status;
}
} else {
status = MarkPerformance();
if (status != 0) {
MS_LOG(ERROR) << "Run MarkPerformance error: %d" << status;
delete graphBuf;
delete(session);
return status;
}
}
......@@ -422,8 +439,8 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
calibData.clear();
}
delete graphBuf;
return 0;
delete(session);
return RET_OK;
}
void BenchmarkFlags::InitInputDataList() {
......@@ -488,10 +505,10 @@ int Benchmark::Init() {
_flags->InitResizeDimsList();
if (!_flags->resizeDims.empty() && _flags->resizeDims.size() != _flags->input_data_list.size()) {
MS_LOG(ERROR) << "Size of input resizeDims should be equal to size of input inDataPath";
return 1;
return RET_ERROR;
}
return 0;
return RET_OK;
}
int RunBenchmark(int argc, const char **argv) {
......@@ -501,19 +518,19 @@ int RunBenchmark(int argc, const char **argv) {
if (err.IsSome()) {
std::cerr << err.Get() << std::endl;
std::cerr << flags.Usage() << std::endl;
return -1;
return RET_ERROR;
}
if (flags.help) {
std::cerr << flags.Usage() << std::endl;
return 0;
return RET_OK;
}
Benchmark mBenchmark(&flags);
auto status = mBenchmark.Init();
if (status != 0) {
MS_LOG(ERROR) << "Benchmark init Error : " << status;
return 1;
return RET_ERROR;
}
if (flags.device == "NPU") {
......@@ -525,12 +542,12 @@ int RunBenchmark(int argc, const char **argv) {
if (status != 0) {
MS_LOG(ERROR) << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str()
<< " Failed : " << status;
return 1;
return RET_ERROR;
}
MS_LOG(INFO) << "Run Benchmark " << flags.modelPath.substr(flags.modelPath.find_last_of(DELIM_SLASH) + 1).c_str()
<< " Success.";
return 0;
return RET_OK;
}
} // namespace lite
} // namespace mindspore
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册