Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
2cedb2ca
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2cedb2ca
编写于
8月 19, 2020
作者:
L
liuzhongkai
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
memory leak
上级
b4b76b61
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
81 addition
and
16 deletion
+81
-16
mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc
...lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc
+2
-2
mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc
mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc
+2
-0
mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc
...ite/test/ut/src/runtime/kernel/opencl/activation_tests.cc
+21
-6
mindspore/lite/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc
...te/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc
+56
-8
未找到文件。
mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc
浏览文件 @
2cedb2ca
...
@@ -201,9 +201,9 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector<lite::t
...
@@ -201,9 +201,9 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector<lite::t
return
nullptr
;
return
nullptr
;
}
}
auto
ret
=
kernel
->
Init
();
auto
ret
=
kernel
->
Init
();
if
(
0
!=
ret
)
{
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Init DepthwiseConv2dOpenCLKernel failed!"
;
delete
kernel
;
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init DepthwiseConv2dOpenCLKernel failed!"
;
return
nullptr
;
return
nullptr
;
}
}
return
kernel
;
return
kernel
;
...
...
mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc
浏览文件 @
2cedb2ca
...
@@ -175,6 +175,8 @@ kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector<lite::tensor::T
...
@@ -175,6 +175,8 @@ kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector<lite::tensor::T
}
}
if
(
inputs
[
0
]
->
shape
()[
0
]
>
1
)
{
if
(
inputs
[
0
]
->
shape
()[
0
]
>
1
)
{
MS_LOG
(
ERROR
)
<<
"Init `Softmax` kernel failed: Unsupported multi-batch."
;
MS_LOG
(
ERROR
)
<<
"Init `Softmax` kernel failed: Unsupported multi-batch."
;
delete
kernel
;
return
nullptr
;
}
}
auto
ret
=
kernel
->
Init
();
auto
ret
=
kernel
->
Init
();
if
(
0
!=
ret
)
{
if
(
0
!=
ret
)
{
...
...
mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc
浏览文件 @
2cedb2ca
...
@@ -88,11 +88,14 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
...
@@ -88,11 +88,14 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
auto
*
kernel
=
auto
*
kernel
=
new
(
std
::
nothrow
)
kernel
::
ActivationOpenClKernel
(
reinterpret_cast
<
OpParameter
*>
(
param
),
inputs
,
outputs
);
new
(
std
::
nothrow
)
kernel
::
ActivationOpenClKernel
(
reinterpret_cast
<
OpParameter
*>
(
param
),
inputs
,
outputs
);
if
(
kernel
==
nullptr
)
{
if
(
kernel
==
nullptr
)
{
delete
param
;
MS_LOG
(
ERROR
)
<<
"Kernel:"
<<
test_name
<<
" create fail."
;
MS_LOG
(
ERROR
)
<<
"Kernel:"
<<
test_name
<<
" create fail."
;
return
nullptr
;
return
nullptr
;
}
}
auto
ret
=
kernel
->
Init
();
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
if
(
ret
!=
RET_OK
)
{
delete
param
;
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init "
<<
test_name
<<
" fail."
;
MS_LOG
(
ERROR
)
<<
"Init "
<<
test_name
<<
" fail."
;
return
nullptr
;
return
nullptr
;
}
}
...
@@ -110,18 +113,22 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
...
@@ -110,18 +113,22 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
std
::
vector
<
kernel
::
LiteKernel
*>
kernels
{
kernel
};
std
::
vector
<
kernel
::
LiteKernel
*>
kernels
{
kernel
};
auto
*
sub_graph
=
new
(
std
::
nothrow
)
kernel
::
SubGraphOpenCLKernel
(
inputs
,
outputs
,
kernels
,
kernels
,
kernels
);
auto
*
sub_graph
=
new
(
std
::
nothrow
)
kernel
::
SubGraphOpenCLKernel
(
inputs
,
outputs
,
kernels
,
kernels
,
kernels
);
if
(
sub_graph
==
nullptr
)
{
if
(
sub_graph
==
nullptr
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Kernel SubGraphOpenCLKernel create fail."
;
MS_LOG
(
ERROR
)
<<
"Kernel SubGraphOpenCLKernel create fail."
;
return
RET_ERROR
;
return
RET_ERROR
;
}
}
MS_LOG
(
INFO
)
<<
"Initialize sub_graph."
;
MS_LOG
(
INFO
)
<<
"Initialize sub_graph."
;
auto
ret
=
sub_graph
->
Init
();
auto
ret
=
sub_graph
->
Init
();
if
(
ret
!=
RET_OK
)
{
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
delete
sub_graph
;
MS_LOG
(
ERROR
)
<<
"Init sub_graph error."
;
MS_LOG
(
ERROR
)
<<
"Init sub_graph error."
;
return
RET_ERROR
;
return
RET_ERROR
;
}
}
MS_LOG
(
INFO
)
<<
"Run SubGraphOpenCLKernel."
;
MS_LOG
(
INFO
)
<<
"Run SubGraphOpenCLKernel."
;
ret
=
sub_graph
->
Run
();
ret
=
sub_graph
->
Run
();
if
(
ret
!=
RET_OK
)
{
if
(
ret
!=
RET_OK
)
{
delete
sub_graph
;
MS_LOG
(
ERROR
)
<<
"Run SubGraphOpenCLKernel error."
;
MS_LOG
(
ERROR
)
<<
"Run SubGraphOpenCLKernel error."
;
return
RET_ERROR
;
return
RET_ERROR
;
}
}
...
@@ -130,7 +137,7 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
...
@@ -130,7 +137,7 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
}
}
TEST_F
(
TestActivationOpenCL
,
ActivationFp32_dim4
)
{
TEST_F
(
TestActivationOpenCL
,
ActivationFp32_dim4
)
{
MS_LOG
(
INFO
)
<<
"Begin test
:
"
;
MS_LOG
(
INFO
)
<<
"Begin test
!
"
;
auto
ocl_runtime
=
lite
::
opencl
::
OpenCLRuntime
::
GetInstance
();
auto
ocl_runtime
=
lite
::
opencl
::
OpenCLRuntime
::
GetInstance
();
ocl_runtime
->
Init
();
ocl_runtime
->
Init
();
auto
allocator
=
ocl_runtime
->
GetAllocator
();
auto
allocator
=
ocl_runtime
->
GetAllocator
();
...
@@ -140,11 +147,21 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
...
@@ -140,11 +147,21 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
auto
data_type
=
kNumberTypeFloat32
;
auto
data_type
=
kNumberTypeFloat32
;
auto
tensor_type
=
schema
::
NodeType_ValueNode
;
auto
tensor_type
=
schema
::
NodeType_ValueNode
;
auto
*
input_tensor
=
new
lite
::
tensor
::
Tensor
(
data_type
,
input_shape
,
schema
::
Format_NHWC4
,
tensor_type
);
auto
*
input_tensor
=
auto
*
output_tensor
=
new
lite
::
tensor
::
Tensor
(
data_type
,
input_shape
,
schema
::
Format_NHWC4
,
tensor_type
);
new
(
std
::
nothrow
)
lite
::
tensor
::
Tensor
(
data_type
,
input_shape
,
schema
::
Format_NHWC4
,
tensor_type
);
if
(
input_tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new input tensor error!"
;
return
;
}
auto
*
output_tensor
=
new
(
std
::
nothrow
)
lite
::
tensor
::
Tensor
(
data_type
,
input_shape
,
schema
::
Format_NHWC4
,
tensor_type
);
if
(
output_tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new output tensor error!"
;
delete
input_tensor
;
return
;
}
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs
{
input_tensor
};
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs
{
input_tensor
};
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs
{
output_tensor
};
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs
{
output_tensor
};
// freamework to do!!! allocate memory by hand
inputs
[
0
]
->
MallocData
(
allocator
);
inputs
[
0
]
->
MallocData
(
allocator
);
std
::
map
<
std
::
string
,
int
>
Test_Activation_Type
;
std
::
map
<
std
::
string
,
int
>
Test_Activation_Type
;
...
@@ -175,13 +192,11 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
...
@@ -175,13 +192,11 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
MS_LOG
(
INFO
)
<<
"==================output data================"
;
MS_LOG
(
INFO
)
<<
"==================output data================"
;
printf_tensor
(
outputs
[
0
]);
printf_tensor
(
outputs
[
0
]);
CompareRes
(
output_tensor
,
Test_Res_File
[
it
->
first
]);
CompareRes
(
output_tensor
,
Test_Res_File
[
it
->
first
]);
delete
kernel
;
it
++
;
it
++
;
}
}
delete
input_tensor
;
delete
input_tensor
;
delete
output_tensor
;
delete
output_tensor
;
lite
::
opencl
::
OpenCLRuntime
::
DeleteInstance
();
lite
::
opencl
::
OpenCLRuntime
::
DeleteInstance
();
return
;
}
}
}
// namespace mindspore
}
// namespace mindspore
mindspore/lite/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc
浏览文件 @
2cedb2ca
...
@@ -93,15 +93,29 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
...
@@ -93,15 +93,29 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
std
::
vector
<
int
>
output_shape
=
{
1
,
4
,
3
,
9
};
std
::
vector
<
int
>
output_shape
=
{
1
,
4
,
3
,
9
};
auto
data_type
=
kNumberTypeFloat32
;
auto
data_type
=
kNumberTypeFloat32
;
auto
tensor_type
=
schema
::
NodeType_ValueNode
;
auto
tensor_type
=
schema
::
NodeType_ValueNode
;
auto
*
input_tensor
=
new
lite
::
tensor
::
Tensor
(
data_type
,
input_shape
,
schema
::
Format_NHWC
,
tensor_type
);
auto
*
input_tensor
=
new
(
std
::
nothrow
)
lite
::
tensor
::
Tensor
(
data_type
,
input_shape
,
schema
::
Format_NHWC
,
tensor_type
);
if
(
input_tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new input tensor error"
;
return
;
}
auto
*
output_tensor
=
new
lite
::
tensor
::
Tensor
(
data_type
,
output_shape
,
schema
::
Format_NHWC4
,
tensor_type
);
auto
*
output_tensor
=
new
lite
::
tensor
::
Tensor
(
data_type
,
output_shape
,
schema
::
Format_NHWC4
,
tensor_type
);
auto
*
weight_tensor
=
if
(
output_tensor
==
nullptr
)
{
new
lite
::
tensor
::
Tensor
(
data_type
,
std
::
vector
<
int
>
{
input_shape
[
3
]},
schema
::
Format_NHWC
,
tensor_type
);
MS_LOG
(
ERROR
)
<<
"new output_tensor error"
;
delete
input_tensor
;
return
;
}
auto
*
weight_tensor
=
new
(
std
::
nothrow
)
lite
::
tensor
::
Tensor
(
data_type
,
std
::
vector
<
int
>
{
input_shape
[
3
]},
schema
::
Format_NHWC
,
tensor_type
);
if
(
weight_tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new weight_tensor error"
;
delete
input_tensor
;
delete
output_tensor
;
return
;
}
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs
{
input_tensor
,
weight_tensor
};
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs
{
input_tensor
,
weight_tensor
};
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs
{
output_tensor
};
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs
{
output_tensor
};
std
::
cout
<<
input_tensor
->
ElementsNum
()
<<
std
::
endl
;
std
::
cout
<<
input_tensor
->
ElementsC4Num
()
<<
std
::
endl
;
// freamework to do!!! allocate memory by hand
inputs
[
0
]
->
MallocData
(
allocator
);
inputs
[
0
]
->
MallocData
(
allocator
);
inputs
[
1
]
->
MallocData
(
allocator
);
inputs
[
1
]
->
MallocData
(
allocator
);
std
::
cout
<<
input_tensor
->
Size
()
<<
std
::
endl
;
std
::
cout
<<
input_tensor
->
Size
()
<<
std
::
endl
;
...
@@ -113,17 +127,33 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
...
@@ -113,17 +127,33 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
MS_LOG
(
INFO
)
<<
"CaffePRelu==================weight data================"
;
MS_LOG
(
INFO
)
<<
"CaffePRelu==================weight data================"
;
printf_tensor_caffeprelu
(
inputs
[
1
],
weight_tensor
->
ElementsNum
());
printf_tensor_caffeprelu
(
inputs
[
1
],
weight_tensor
->
ElementsNum
());
auto
param
=
new
CaffePReluParameter
();
auto
param
=
new
(
std
::
nothrow
)
CaffePReluParameter
();
if
(
param
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new param error!"
;
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
return
;
}
param
->
channel_num_
=
input_shape
[
3
];
param
->
channel_num_
=
input_shape
[
3
];
auto
*
caffeprelu_kernel
=
auto
*
caffeprelu_kernel
=
new
(
std
::
nothrow
)
kernel
::
CaffePReluOpenCLKernel
(
reinterpret_cast
<
OpParameter
*>
(
param
),
inputs
,
outputs
);
new
(
std
::
nothrow
)
kernel
::
CaffePReluOpenCLKernel
(
reinterpret_cast
<
OpParameter
*>
(
param
),
inputs
,
outputs
);
if
(
caffeprelu_kernel
==
nullptr
)
{
if
(
caffeprelu_kernel
==
nullptr
)
{
delete
param
;
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
MS_LOG
(
ERROR
)
<<
"Create caffe prelu kernel error."
;
MS_LOG
(
ERROR
)
<<
"Create caffe prelu kernel error."
;
return
;
return
;
}
}
auto
ret
=
caffeprelu_kernel
->
Init
();
auto
ret
=
caffeprelu_kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
if
(
ret
!=
RET_OK
)
{
delete
param
;
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
delete
caffeprelu_kernel
;
MS_LOG
(
ERROR
)
<<
"caffeprelu_kernel init error."
;
MS_LOG
(
ERROR
)
<<
"caffeprelu_kernel init error."
;
return
;
return
;
}
}
...
@@ -132,24 +162,42 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
...
@@ -132,24 +162,42 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
std
::
vector
<
kernel
::
LiteKernel
*>
kernels
{
caffeprelu_kernel
};
std
::
vector
<
kernel
::
LiteKernel
*>
kernels
{
caffeprelu_kernel
};
auto
*
sub_graph
=
new
(
std
::
nothrow
)
kernel
::
SubGraphOpenCLKernel
({
input_tensor
},
outputs
,
kernels
,
kernels
,
kernels
);
auto
*
sub_graph
=
new
(
std
::
nothrow
)
kernel
::
SubGraphOpenCLKernel
({
input_tensor
},
outputs
,
kernels
,
kernels
,
kernels
);
if
(
sub_graph
==
nullptr
)
{
if
(
sub_graph
==
nullptr
)
{
delete
param
;
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
delete
caffeprelu_kernel
;
MS_LOG
(
ERROR
)
<<
"Create sub_graph kernel error."
;
MS_LOG
(
ERROR
)
<<
"Create sub_graph kernel error."
;
return
;
return
;
}
}
ret
=
sub_graph
->
Init
();
ret
=
sub_graph
->
Init
();
if
(
ret
!=
RET_OK
)
{
if
(
ret
!=
RET_OK
)
{
delete
param
;
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
delete
caffeprelu_kernel
;
delete
sub_graph
;
MS_LOG
(
ERROR
)
<<
"sub_graph init error."
;
MS_LOG
(
ERROR
)
<<
"sub_graph init error."
;
return
;
return
;
}
}
MS_LOG
(
INFO
)
<<
"Sub graph begin running!"
;
MS_LOG
(
INFO
)
<<
"Sub graph begin running!"
;
ret
=
sub_graph
->
Run
();
ret
=
sub_graph
->
Run
();
if
(
ret
!=
RET_OK
)
{
if
(
ret
!=
RET_OK
)
{
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
delete
sub_graph
;
MS_LOG
(
ERROR
)
<<
"sub_graph run error."
;
MS_LOG
(
ERROR
)
<<
"sub_graph run error."
;
return
;
return
;
}
}
MS_LOG
(
INFO
)
<<
"CaffePRelu==================output data================"
;
MS_LOG
(
INFO
)
<<
"CaffePRelu==================output data================"
;
printf_tensor_caffeprelu
(
outputs
[
0
],
output_tensor
->
ElementsC4Num
());
printf_tensor_caffeprelu
(
outputs
[
0
],
output_tensor
->
ElementsC4Num
());
std
::
cout
<<
"output date size:"
<<
output_tensor
->
Size
()
<<
std
::
endl
;
CompareOutCaffePRelu
(
output_tensor
,
standard_answer_file
);
CompareOutCaffePRelu
(
output_tensor
,
standard_answer_file
);
delete
input_tensor
;
delete
output_tensor
;
delete
weight_tensor
;
delete
sub_graph
;
}
}
}
// namespace mindspore
}
// namespace mindspore
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录