Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
42594daf
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
42594daf
编写于
8月 03, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 03, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3898 matmul bug
Merge pull request !3898 from ling/conv1x1
上级
0f4f343e
60d7506c
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
12 addition
and
21 deletion
+12
-21
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h
...spore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h
+3
-4
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h
...re/lite/src/runtime/kernel/arm/base/batch_to_space_base.h
+3
-4
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h
...re/lite/src/runtime/kernel/arm/base/depth_to_space_base.h
+1
-2
mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h
mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h
+1
-1
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/matmul.cc
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/matmul.cc
+0
-6
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc
.../test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc
+3
-3
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc
.../test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc
+1
-1
未找到文件。
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h
浏览文件 @
42594daf
...
...
@@ -26,12 +26,10 @@ class ArgMinMaxBaseCPUKernel : public LiteKernel {
ArgMinMaxBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
context_
(
ctx
),
data_from_allocator_
(
false
)
{
opParameter
->
thread_num_
=
ctx
->
thread
Num
;
opParameter
->
thread_num_
=
ctx
->
thread
_num_
;
}
virtual
~
ArgMinMaxBaseCPUKernel
()
{
FreeTmpMemory
();
}
virtual
~
ArgMinMaxBaseCPUKernel
()
{
FreeTmpMemory
();
}
int
Init
()
override
;
...
...
@@ -40,6 +38,7 @@ class ArgMinMaxBaseCPUKernel : public LiteKernel {
int
Run
()
override
;
void
FreeTmpMemory
();
private:
const
lite
::
Context
*
context_
;
bool
data_from_allocator_
;
...
...
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h
浏览文件 @
42594daf
...
...
@@ -27,7 +27,7 @@ class BatchToSpaceBaseCPUKernel : public LiteKernel {
BatchToSpaceBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{
opParameter
->
thread_num_
=
ctx
->
thread
Num
;
opParameter
->
thread_num_
=
ctx
->
thread
_num_
;
}
virtual
~
BatchToSpaceBaseCPUKernel
()
=
default
;
...
...
@@ -38,9 +38,8 @@ class BatchToSpaceBaseCPUKernel : public LiteKernel {
int
Run
()
override
{
return
0
;
}
bool
IsNoCrop
()
const
{
return
no_crop_
;
}
bool
IsNoCrop
()
const
{
return
no_crop_
;
}
private:
bool
no_crop_
;
};
...
...
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h
浏览文件 @
42594daf
...
...
@@ -27,7 +27,7 @@ class DepthToSpaceBaseCPUKernel : public LiteKernel {
DepthToSpaceBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{
opParameter
->
thread_num_
=
ctx
->
thread
Num
;
opParameter
->
thread_num_
=
ctx
->
thread
_num_
;
}
virtual
~
DepthToSpaceBaseCPUKernel
()
=
default
;
...
...
@@ -39,5 +39,4 @@ class DepthToSpaceBaseCPUKernel : public LiteKernel {
int
Run
()
override
{
return
0
;
}
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_
mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h
浏览文件 @
42594daf
...
...
@@ -29,7 +29,7 @@ class MatmulBaseCPUKernel : public LiteKernel {
public:
MatmulBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
ctx_
(
ctx
),
thread_count_
(
ctx
->
thread
Num
)
{
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
ctx_
(
ctx
),
thread_count_
(
ctx
->
thread
_num_
)
{
params_
=
reinterpret_cast
<
MatMulParameter
*>
(
opParameter
);
}
~
MatmulBaseCPUKernel
()
=
default
;
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/matmul.cc
浏览文件 @
42594daf
...
...
@@ -76,12 +76,6 @@ void MatMul8x8(const float *a, const float *b, float *c, const float *bias, ActT
void
MatMul
(
const
float
*
a
,
const
float
*
b
,
float
*
c
,
const
float
*
bias
,
ActType
act_type
,
int
deep
,
int
row_8_
,
int
col_8_
)
{
#ifdef __aarch64__
float
minf
=
(
act_type
==
ActType_No
)
?
FLT_MIN
:
0.
f
;
float
maxf
=
(
act_type
==
ActType_Relu6
)
?
6.0
f
:
FLT_MAX
;
MatMulFloatNeon64
(
a
,
b
,
c
,
bias
,
maxf
,
minf
,
deep
,
row_8_
,
col_8_
);
#else
MatMul8x8
(
a
,
b
,
c
,
bias
,
act_type
,
deep
,
row_8_
,
col_8_
);
#endif
return
;
}
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc
浏览文件 @
42594daf
...
...
@@ -67,7 +67,7 @@ TEST_F(TestMatMulFp32, simple) {
std
::
vector
<
int
>
c_shape
=
{
1
,
2
,
3
};
int
total_size
=
MMTestInit
(
&
inputs_
,
&
outputs_
,
a
,
b
,
a_shape
,
b_shape
,
c_shape
);
auto
ctx
=
new
lite
::
Context
;
ctx
->
thread
Num
=
2
;
ctx
->
thread
_num_
=
2
;
auto
mm
=
new
kernel
::
MatmulCPUKernel
(
reinterpret_cast
<
OpParameter
*>
(
matmul_param
),
inputs_
,
outputs_
,
ctx
);
mm
->
Init
();
mm
->
Run
();
...
...
@@ -98,7 +98,7 @@ TEST_F(TestMatMulFp32, simple_transb) {
std
::
vector
<
int
>
c_shape
=
{
1
,
2
,
3
};
int
total_size
=
MMTestInit
(
&
inputs_
,
&
outputs_
,
a
,
b
,
a_shape
,
b_shape
,
c_shape
);
auto
ctx
=
new
lite
::
Context
;
ctx
->
thread
Num
=
2
;
ctx
->
thread
_num_
=
2
;
auto
mm
=
new
kernel
::
MatmulCPUKernel
(
reinterpret_cast
<
OpParameter
*>
(
matmul_param
),
inputs_
,
outputs_
,
ctx
);
mm
->
Init
();
mm
->
Run
();
...
...
@@ -148,7 +148,7 @@ TEST_F(TestMatMulFp32, batch) {
std
::
vector
<
int
>
c_shape
=
{
3
,
2
,
3
};
int
total_size
=
MMTestInit
(
&
inputs_
,
&
outputs_
,
a
,
b
,
a_shape
,
b_shape
,
c_shape
);
auto
ctx
=
new
lite
::
Context
;
ctx
->
thread
Num
=
1
;
ctx
->
thread
_num_
=
1
;
auto
mm
=
new
kernel
::
MatmulCPUKernel
(
reinterpret_cast
<
OpParameter
*>
(
matmul_param
),
inputs_
,
outputs_
,
ctx
);
mm
->
Init
();
mm
->
Run
();
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc
浏览文件 @
42594daf
...
...
@@ -106,7 +106,7 @@ TEST_F(TestMatmulInt8, mmint8) {
int
output_zp
;
int
total_size
=
MMInt8TestInit
(
&
inputs_
,
&
outputs_
,
matmul_param
,
&
correct
,
&
output_scale
,
&
output_zp
);
auto
ctx
=
new
lite
::
Context
;
ctx
->
thread
Num
=
2
;
ctx
->
thread
_num_
=
2
;
kernel
::
MatmulInt8CPUKernel
*
mm
=
new
kernel
::
MatmulInt8CPUKernel
(
reinterpret_cast
<
OpParameter
*>
(
matmul_param
),
inputs_
,
outputs_
,
ctx
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录