提交 2cedb2ca 编写于 作者: L liuzhongkai

memory leak

上级 b4b76b61
...@@ -201,9 +201,9 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector<lite::t ...@@ -201,9 +201,9 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector<lite::t
return nullptr; return nullptr;
} }
auto ret = kernel->Init(); auto ret = kernel->Init();
if (0 != ret) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Init DepthwiseConv2dOpenCLKernel failed!";
delete kernel; delete kernel;
MS_LOG(ERROR) << "Init DepthwiseConv2dOpenCLKernel failed!";
return nullptr; return nullptr;
} }
return kernel; return kernel;
......
...@@ -175,6 +175,8 @@ kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector<lite::tensor::T ...@@ -175,6 +175,8 @@ kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector<lite::tensor::T
} }
if (inputs[0]->shape()[0] > 1) { if (inputs[0]->shape()[0] > 1) {
MS_LOG(ERROR) << "Init `Softmax` kernel failed: Unsupported multi-batch."; MS_LOG(ERROR) << "Init `Softmax` kernel failed: Unsupported multi-batch.";
delete kernel;
return nullptr;
} }
auto ret = kernel->Init(); auto ret = kernel->Init();
if (0 != ret) { if (0 != ret) {
......
...@@ -88,11 +88,14 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all ...@@ -88,11 +88,14 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
auto *kernel = auto *kernel =
new (std::nothrow) kernel::ActivationOpenClKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs); new (std::nothrow) kernel::ActivationOpenClKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs);
if (kernel == nullptr) { if (kernel == nullptr) {
delete param;
MS_LOG(ERROR) << "Kernel:" << test_name << " create fail."; MS_LOG(ERROR) << "Kernel:" << test_name << " create fail.";
return nullptr; return nullptr;
} }
auto ret = kernel->Init(); auto ret = kernel->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
delete param;
delete kernel;
MS_LOG(ERROR) << "Init " << test_name << " fail."; MS_LOG(ERROR) << "Init " << test_name << " fail.";
return nullptr; return nullptr;
} }
...@@ -110,18 +113,22 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs, ...@@ -110,18 +113,22 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
std::vector<kernel::LiteKernel *> kernels{kernel}; std::vector<kernel::LiteKernel *> kernels{kernel};
auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel(inputs, outputs, kernels, kernels, kernels); auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel(inputs, outputs, kernels, kernels, kernels);
if (sub_graph == nullptr) { if (sub_graph == nullptr) {
delete kernel;
MS_LOG(ERROR) << "Kernel SubGraphOpenCLKernel create fail."; MS_LOG(ERROR) << "Kernel SubGraphOpenCLKernel create fail.";
return RET_ERROR; return RET_ERROR;
} }
MS_LOG(INFO) << "Initialize sub_graph."; MS_LOG(INFO) << "Initialize sub_graph.";
auto ret = sub_graph->Init(); auto ret = sub_graph->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
delete kernel;
delete sub_graph;
MS_LOG(ERROR) << "Init sub_graph error."; MS_LOG(ERROR) << "Init sub_graph error.";
return RET_ERROR; return RET_ERROR;
} }
MS_LOG(INFO) << "Run SubGraphOpenCLKernel."; MS_LOG(INFO) << "Run SubGraphOpenCLKernel.";
ret = sub_graph->Run(); ret = sub_graph->Run();
if (ret != RET_OK) { if (ret != RET_OK) {
delete sub_graph;
MS_LOG(ERROR) << "Run SubGraphOpenCLKernel error."; MS_LOG(ERROR) << "Run SubGraphOpenCLKernel error.";
return RET_ERROR; return RET_ERROR;
} }
...@@ -130,7 +137,7 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs, ...@@ -130,7 +137,7 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
} }
TEST_F(TestActivationOpenCL, ActivationFp32_dim4) { TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
MS_LOG(INFO) << "Begin test:"; MS_LOG(INFO) << "Begin test!";
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance();
ocl_runtime->Init(); ocl_runtime->Init();
auto allocator = ocl_runtime->GetAllocator(); auto allocator = ocl_runtime->GetAllocator();
...@@ -140,11 +147,21 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) { ...@@ -140,11 +147,21 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
auto data_type = kNumberTypeFloat32; auto data_type = kNumberTypeFloat32;
auto tensor_type = schema::NodeType_ValueNode; auto tensor_type = schema::NodeType_ValueNode;
auto *input_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type); auto *input_tensor =
auto *output_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type); new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error!";
return;
}
auto *output_tensor =
new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type);
if (output_tensor == nullptr) {
MS_LOG(ERROR) << "new output tensor error!";
delete input_tensor;
return;
}
std::vector<lite::tensor::Tensor *> inputs{input_tensor}; std::vector<lite::tensor::Tensor *> inputs{input_tensor};
std::vector<lite::tensor::Tensor *> outputs{output_tensor}; std::vector<lite::tensor::Tensor *> outputs{output_tensor};
// freamework to do!!! allocate memory by hand
inputs[0]->MallocData(allocator); inputs[0]->MallocData(allocator);
std::map<std::string, int> Test_Activation_Type; std::map<std::string, int> Test_Activation_Type;
...@@ -175,13 +192,11 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) { ...@@ -175,13 +192,11 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
MS_LOG(INFO) << "==================output data================"; MS_LOG(INFO) << "==================output data================";
printf_tensor(outputs[0]); printf_tensor(outputs[0]);
CompareRes(output_tensor, Test_Res_File[it->first]); CompareRes(output_tensor, Test_Res_File[it->first]);
delete kernel;
it++; it++;
} }
delete input_tensor; delete input_tensor;
delete output_tensor; delete output_tensor;
lite::opencl::OpenCLRuntime::DeleteInstance(); lite::opencl::OpenCLRuntime::DeleteInstance();
return;
} }
} // namespace mindspore } // namespace mindspore
...@@ -93,15 +93,29 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) { ...@@ -93,15 +93,29 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
std::vector<int> output_shape = {1, 4, 3, 9}; std::vector<int> output_shape = {1, 4, 3, 9};
auto data_type = kNumberTypeFloat32; auto data_type = kNumberTypeFloat32;
auto tensor_type = schema::NodeType_ValueNode; auto tensor_type = schema::NodeType_ValueNode;
auto *input_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); auto *input_tensor =
new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error";
return;
}
auto *output_tensor = new lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); auto *output_tensor = new lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type);
auto *weight_tensor = if (output_tensor == nullptr) {
new lite::tensor::Tensor(data_type, std::vector<int>{input_shape[3]}, schema::Format_NHWC, tensor_type); MS_LOG(ERROR) << "new output_tensor error";
delete input_tensor;
return;
}
auto *weight_tensor = new (std::nothrow)
lite::tensor::Tensor(data_type, std::vector<int>{input_shape[3]}, schema::Format_NHWC, tensor_type);
if (weight_tensor == nullptr) {
MS_LOG(ERROR) << "new weight_tensor error";
delete input_tensor;
delete output_tensor;
return;
}
std::vector<lite::tensor::Tensor *> inputs{input_tensor, weight_tensor}; std::vector<lite::tensor::Tensor *> inputs{input_tensor, weight_tensor};
std::vector<lite::tensor::Tensor *> outputs{output_tensor}; std::vector<lite::tensor::Tensor *> outputs{output_tensor};
std::cout << input_tensor->ElementsNum() << std::endl;
std::cout << input_tensor->ElementsC4Num() << std::endl;
// freamework to do!!! allocate memory by hand
inputs[0]->MallocData(allocator); inputs[0]->MallocData(allocator);
inputs[1]->MallocData(allocator); inputs[1]->MallocData(allocator);
std::cout << input_tensor->Size() << std::endl; std::cout << input_tensor->Size() << std::endl;
...@@ -113,17 +127,33 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) { ...@@ -113,17 +127,33 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
MS_LOG(INFO) << "CaffePRelu==================weight data================"; MS_LOG(INFO) << "CaffePRelu==================weight data================";
printf_tensor_caffeprelu(inputs[1], weight_tensor->ElementsNum()); printf_tensor_caffeprelu(inputs[1], weight_tensor->ElementsNum());
auto param = new CaffePReluParameter(); auto param = new (std::nothrow) CaffePReluParameter();
if (param == nullptr) {
MS_LOG(ERROR) << "new param error!";
delete input_tensor;
delete output_tensor;
delete weight_tensor;
return;
}
param->channel_num_ = input_shape[3]; param->channel_num_ = input_shape[3];
auto *caffeprelu_kernel = auto *caffeprelu_kernel =
new (std::nothrow) kernel::CaffePReluOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs); new (std::nothrow) kernel::CaffePReluOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs);
if (caffeprelu_kernel == nullptr) { if (caffeprelu_kernel == nullptr) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
MS_LOG(ERROR) << "Create caffe prelu kernel error."; MS_LOG(ERROR) << "Create caffe prelu kernel error.";
return; return;
} }
auto ret = caffeprelu_kernel->Init(); auto ret = caffeprelu_kernel->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete caffeprelu_kernel;
MS_LOG(ERROR) << "caffeprelu_kernel init error."; MS_LOG(ERROR) << "caffeprelu_kernel init error.";
return; return;
} }
...@@ -132,24 +162,42 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) { ...@@ -132,24 +162,42 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
std::vector<kernel::LiteKernel *> kernels{caffeprelu_kernel}; std::vector<kernel::LiteKernel *> kernels{caffeprelu_kernel};
auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel({input_tensor}, outputs, kernels, kernels, kernels); auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel({input_tensor}, outputs, kernels, kernels, kernels);
if (sub_graph == nullptr) { if (sub_graph == nullptr) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete caffeprelu_kernel;
MS_LOG(ERROR) << "Create sub_graph kernel error."; MS_LOG(ERROR) << "Create sub_graph kernel error.";
return; return;
} }
ret = sub_graph->Init(); ret = sub_graph->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete caffeprelu_kernel;
delete sub_graph;
MS_LOG(ERROR) << "sub_graph init error."; MS_LOG(ERROR) << "sub_graph init error.";
return; return;
} }
MS_LOG(INFO) << "Sub graph begin running!"; MS_LOG(INFO) << "Sub graph begin running!";
ret = sub_graph->Run(); ret = sub_graph->Run();
if (ret != RET_OK) { if (ret != RET_OK) {
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete sub_graph;
MS_LOG(ERROR) << "sub_graph run error."; MS_LOG(ERROR) << "sub_graph run error.";
return; return;
} }
MS_LOG(INFO) << "CaffePRelu==================output data================"; MS_LOG(INFO) << "CaffePRelu==================output data================";
printf_tensor_caffeprelu(outputs[0], output_tensor->ElementsC4Num()); printf_tensor_caffeprelu(outputs[0], output_tensor->ElementsC4Num());
std::cout << "output date size:" << output_tensor->Size() << std::endl;
CompareOutCaffePRelu(output_tensor, standard_answer_file); CompareOutCaffePRelu(output_tensor, standard_answer_file);
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete sub_graph;
} }
} // namespace mindspore } // namespace mindspore
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册