Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
df6707bd
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
df6707bd
编写于
8月 03, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 03, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3785 [lite] batch_to_space, depth_to_space,argmin,argmax support int8
Merge pull request !3785 from chenjianping/lite_dev
上级
0a89563f
131cad16
变更
34
展开全部
隐藏空白更改
内联
并排
Showing
34 changed file
with
2048 addition
and
226 deletion
+2048
-226
mindspore/lite/src/ops/argmax.cc
mindspore/lite/src/ops/argmax.cc
+1
-1
mindspore/lite/src/ops/argmin.cc
mindspore/lite/src/ops/argmin.cc
+1
-1
mindspore/lite/src/populate_parameter.cc
mindspore/lite/src/populate_parameter.cc
+2
-2
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc
...pore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc
+149
-0
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h
...spore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h
+49
-0
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc
...e/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc
+98
-0
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h
...re/lite/src/runtime/kernel/arm/base/batch_to_space_base.h
+49
-0
mindspore/lite/src/runtime/kernel/arm/base/concat_base.h
mindspore/lite/src/runtime/kernel/arm/base/concat_base.h
+1
-1
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc
...e/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc
+114
-0
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h
...re/lite/src/runtime/kernel/arm/base/depth_to_space_base.h
+43
-0
mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc
mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc
+9
-59
mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h
mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h
+4
-7
mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc
mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc
+3
-44
mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h
mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h
+4
-9
mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc
mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc
+8
-39
mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h
mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h
+4
-4
mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc
mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc
+42
-0
mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h
mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h
+37
-0
mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc
...e/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc
+49
-0
mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h
...re/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h
+37
-0
mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc
...e/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc
+54
-0
mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h
...re/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h
+37
-0
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max.cc
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max.cc
+158
-0
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max.h
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max.h
+6
-12
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max_parameter.h
...ite/src/runtime/kernel/arm/opclib/arg_min_max_parameter.h
+46
-0
mindspore/lite/src/runtime/kernel/arm/opclib/batch_to_space.cc
...pore/lite/src/runtime/kernel/arm/opclib/batch_to_space.cc
+1
-1
mindspore/lite/src/runtime/kernel/arm/opclib/batch_to_space.h
...spore/lite/src/runtime/kernel/arm/opclib/batch_to_space.h
+2
-2
mindspore/lite/src/runtime/kernel/arm/opclib/depth_to_space.cc
...pore/lite/src/runtime/kernel/arm/opclib/depth_to_space.cc
+42
-0
mindspore/lite/src/runtime/kernel/arm/opclib/depth_to_space.h
...spore/lite/src/runtime/kernel/arm/opclib/depth_to_space.h
+33
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/arg_min_max.cc
...re/lite/src/runtime/kernel/arm/opclib/fp32/arg_min_max.cc
+430
-26
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/arg_min_max.h
...ore/lite/src/runtime/kernel/arm/opclib/fp32/arg_min_max.h
+13
-17
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arg_min_max.cc
...re/lite/src/runtime/kernel/arm/opclib/int8/arg_min_max.cc
+488
-0
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arg_min_max.h
...ore/lite/src/runtime/kernel/arm/opclib/int8/arg_min_max.h
+33
-0
mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc
...ite/tools/converter/parser/tflite/tflite_argmax_parser.cc
+1
-1
未找到文件。
mindspore/lite/src/ops/argmax.cc
浏览文件 @
df6707bd
...
...
@@ -38,7 +38,7 @@ int ArgMax::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
MS_LOG
(
ERROR
)
<<
"Invalid axis "
<<
argmax_prim
->
axis
()
<<
", input shape size: "
<<
input_shape_size
;
return
RET_PARAM_INVALID
;
}
if
(
argmax_prim
->
topK
()
==
-
1
)
{
if
(
argmax_prim
->
topK
()
==
1
)
{
output_shape
.
erase
(
output_shape
.
begin
()
+
axis
);
}
else
if
(
argmax_prim
->
axisType
()
==
1
)
{
output_shape
[
axis
]
=
argmax_prim
->
topK
();
...
...
mindspore/lite/src/ops/argmin.cc
浏览文件 @
df6707bd
...
...
@@ -37,7 +37,7 @@ int ArgMin::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
return
RET_PARAM_INVALID
;
}
std
::
vector
<
int
>
output_shape
(
input
->
shape
());
if
(
argmin_prim
->
topK
()
==
-
1
)
{
if
(
argmin_prim
->
topK
()
==
1
)
{
output_shape
.
erase
(
output_shape
.
begin
()
+
axis
);
}
else
if
(
argmin_prim
->
axisType
()
==
1
)
{
output_shape
[
axis
]
=
argmin_prim
->
topK
();
...
...
mindspore/lite/src/populate_parameter.cc
浏览文件 @
df6707bd
...
...
@@ -28,7 +28,7 @@
#include "src/runtime/kernel/arm/opclib/reshape_parameter.h"
#include "src/runtime/kernel/arm/opclib/fp32/stack.h"
#include "src/runtime/kernel/arm/opclib/unstack.h"
#include "src/runtime/kernel/arm/opclib/
fp32/
depth_to_space.h"
#include "src/runtime/kernel/arm/opclib/depth_to_space.h"
#include "src/runtime/kernel/arm/opclib/conv_parameter.h"
#include "src/runtime/kernel/arm/opclib/fp32/pooling.h"
#include "src/runtime/kernel/arm/opclib/matmul.h"
...
...
@@ -57,7 +57,7 @@
#include "src/runtime/kernel/arm/opclib/fp32/gatherNd.h"
#include "src/runtime/kernel/arm/opclib/resize.h"
#include "src/runtime/kernel/arm/opclib/scatter_nd.h"
#include "src/runtime/kernel/arm/opclib/
fp32/
batch_to_space.h"
#include "src/runtime/kernel/arm/opclib/batch_to_space.h"
#include "src/runtime/kernel/arm/opclib/fp32/crop.h"
#include "src/runtime/kernel/arm/fp32/flatten.h"
#include "src/runtime/kernel/arm/opclib/fp32/unsqueeze.h"
...
...
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/arg_min_max_base.h"
#include "src/runtime/kernel/arm/opclib/arg_min_max.h"
#include "src/runtime/kernel/arm/fp32/argminmax.h"
#include "src/runtime/kernel/arm/int8/argminmax_int8.h"
#include "schema/model_generated.h"
#include "src/kernel_factory.h"
#include "include/errorcode.h"
#include "include/context.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_PARAM_INVALID
;
using
mindspore
::
lite
::
RET_FORMAT_ERR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_ArgMax
;
using
mindspore
::
schema
::
PrimitiveType_ArgMin
;
namespace
mindspore
::
kernel
{
int
ArgMinMaxBaseCPUKernel
::
Init
()
{
auto
param
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
);
switch
(
opParameter
->
type_
)
{
case
PrimitiveType_ArgMax
:
param
->
get_max_
=
true
;
break
;
case
PrimitiveType_ArgMin
:
param
->
get_max_
=
false
;
break
;
default:
MS_LOG
(
ERROR
)
<<
"Unexpected type "
<<
opParameter
->
type_
;
return
RET_ERROR
;
}
auto
in_shape
=
inputs_
.
at
(
0
)
->
shape
();
auto
dims_size
=
in_shape
.
size
();
int
axis
=
param
->
axis_
<
0
?
param
->
axis_
+
dims_size
:
param
->
axis_
;
param
->
axis_
=
axis
;
param
->
dims_size_
=
dims_size
;
if
(
param
->
topk_
<=
0
)
{
MS_LOG
(
ERROR
)
<<
"Invalid topk "
<<
param
->
topk_
;
return
RET_PARAM_INVALID
;
}
param
->
topk_
=
MSMIN
(
param
->
topk_
,
in_shape
[
axis
]);
if
(
param
->
topk_
>
1
)
{
if
(
context_
!=
nullptr
&&
context_
->
allocator
!=
nullptr
)
{
param
->
arg_elements_
=
reinterpret_cast
<
ArgElement
*>
(
context_
->
allocator
->
Malloc
(
sizeof
(
ArgElement
)
*
in_shape
[
axis
]));
data_from_allocator_
=
true
;
}
else
{
param
->
arg_elements_
=
reinterpret_cast
<
ArgElement
*>
(
malloc
(
sizeof
(
ArgElement
)
*
in_shape
[
axis
]));
}
if
(
param
->
arg_elements_
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"malloc memroy fail!"
;
return
RET_ERROR
;
}
}
return
RET_OK
;
}
int
ArgMinMaxBaseCPUKernel
::
Run
()
{
auto
input
=
inputs_
.
at
(
0
);
auto
input_data
=
reinterpret_cast
<
const
void
*>
(
inputs_
.
at
(
0
)
->
Data
());
auto
output_data
=
outputs_
.
at
(
0
)
->
Data
();
auto
shape
=
input
->
shape
().
data
();
auto
param
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
);
ArgMinMax
(
input_data
,
output_data
,
reinterpret_cast
<
const
int
*>
(
shape
),
param
);
return
RET_OK
;
}
void
ArgMinMaxBaseCPUKernel
::
FreeTmpMemory
()
{
auto
param
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
);
if
(
param
->
arg_elements_
==
nullptr
)
{
return
;
}
if
(
data_from_allocator_
)
{
context_
->
allocator
->
Free
(
param
->
arg_elements_
);
}
else
{
free
(
param
->
arg_elements_
);
}
param
->
arg_elements_
=
nullptr
;
}
kernel
::
LiteKernel
*
CpuArgMinMaxInt8KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
auto
kernel
=
new
(
std
::
nothrow
)
ArgMinMaxInt8CPUKernel
(
op_parameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new ArgMinMaxInt8CPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
kernel
::
LiteKernel
*
CpuArgMinMaxFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
auto
kernel
=
new
(
std
::
nothrow
)
ArgMinMaxCPUKernel
(
op_parameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new ArgMinMaxCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_ArgMax
,
CpuArgMinMaxFp32KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_ArgMin
,
CpuArgMinMaxFp32KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_ArgMax
,
CpuArgMinMaxInt8KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_ArgMin
,
CpuArgMinMaxInt8KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_ARG_MIN_MAX_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_ARG_MIN_MAX_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
namespace
mindspore
::
kernel
{
class
ArgMinMaxBaseCPUKernel
:
public
LiteKernel
{
public:
ArgMinMaxBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
context_
(
ctx
),
data_from_allocator_
(
false
)
{
opParameter
->
thread_num_
=
ctx
->
threadNum
;
}
virtual
~
ArgMinMaxBaseCPUKernel
()
{
FreeTmpMemory
();
}
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
void
FreeTmpMemory
();
private:
const
lite
::
Context
*
context_
;
bool
data_from_allocator_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_ARG_MIN_MAX_BASE_H_
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/batch_to_space_base.h"
#include "src/runtime/kernel/arm/opclib/batch_to_space.h"
#include "src/runtime/kernel/arm/fp32/batch_to_space.h"
#include "src/runtime/kernel/arm/int8/batch_to_space_int8.h"
#include "schema/model_generated.h"
#include "src/kernel_factory.h"
#include "include/errorcode.h"
#include "include/context.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_FORMAT_ERR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_BatchToSpace
;
namespace
mindspore
::
kernel
{
int
BatchToSpaceBaseCPUKernel
::
Init
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"batch_to_space only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
BatchToSpaceParameter
*
param
=
reinterpret_cast
<
BatchToSpaceParameter
*>
(
this
->
opParameter
);
for
(
int
i
=
0
;
i
<
BATCH_TO_SPACE_CROPS_SIZE
;
++
i
)
{
if
(
param
->
crops_
[
i
]
!=
0
)
{
no_crop_
=
false
;
}
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuBatchToSpaceInt8KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
MS_ASSERT
(
desc
.
type
==
schema
::
PrimitiveType_BatchToSpace
);
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
BatchToSpaceInt8CPUKernel
(
op_parameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new BatchToSpaceInt8CPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
kernel
::
LiteKernel
*
CpuBatchToSpaceFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
MS_ASSERT
(
desc
.
type
==
schema
::
PrimitiveType_BatchToSpace
);
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
BatchToSpaceCPUKernel
(
op_parameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new BatchToSpaceCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_BatchToSpace
,
CpuBatchToSpaceInt8KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_BatchToSpace
,
CpuBatchToSpaceFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_BATCH_TO_SPACE_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_BATCH_TO_SPACE_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/opclib/concat_parameter.h"
namespace
mindspore
::
kernel
{
class
BatchToSpaceBaseCPUKernel
:
public
LiteKernel
{
public:
BatchToSpaceBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{
opParameter
->
thread_num_
=
ctx
->
threadNum
;
}
virtual
~
BatchToSpaceBaseCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
{
return
0
;
}
bool
IsNoCrop
()
const
{
return
no_crop_
;
}
private:
bool
no_crop_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_BATCH_TO_SPACE_BASE_H_
mindspore/lite/src/runtime/kernel/arm/base/concat_base.h
浏览文件 @
df6707bd
...
...
@@ -34,7 +34,7 @@ class ConcatBaseCPUKernel : public LiteKernel {
concat_param_
=
reinterpret_cast
<
ConcatParameter
*>
(
opParameter
);
}
~
ConcatBaseCPUKernel
()
=
default
;
virtual
~
ConcatBaseCPUKernel
()
=
default
;
int
Init
()
override
;
...
...
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/depth_to_space_base.h"
#include "src/runtime/kernel/arm/opclib/depth_to_space.h"
#include "src/runtime/kernel/arm/fp32/depth_to_space.h"
#include "src/runtime/kernel/arm/int8/depth_to_space_int8.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_common.h"
#include "schema/model_generated.h"
#include "src/kernel_factory.h"
#include "include/errorcode.h"
#include "include/context.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_PARAM_INVALID
;
using
mindspore
::
lite
::
RET_FORMAT_ERR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_DepthToSpace
;
namespace
mindspore
::
kernel
{
int
DepthToSpaceBaseCPUKernel
::
Init
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"depth_to_space only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
DepthToSpaceParameter
*
param
=
reinterpret_cast
<
DepthToSpaceParameter
*>
(
opParameter
);
if
(
param
->
block_size_
<=
0
)
{
MS_LOG
(
ERROR
)
<<
"Input block_size should > 0!"
;
return
RET_PARAM_INVALID
;
}
auto
shape_size
=
inputs_
[
0
]
->
shape
().
size
();
if
(
shape_size
!=
DIMENSION_4D
)
{
MS_LOG
(
ERROR
)
<<
"Input shape size should be "
<<
DIMENSION_4D
;
return
RET_PARAM_INVALID
;
}
int32_t
in_strides
[
DIMENSION_4D
];
ComputeStrides
(
const_cast
<
int
*>
(
inputs_
[
0
]
->
shape
().
data
()),
in_strides
,
shape_size
);
param
->
in_stride_dim0_
=
in_strides
[
0
];
param
->
in_stride_dim1_
=
in_strides
[
1
];
param
->
in_stride_dim2_
=
in_strides
[
2
];
int32_t
out_strides
[
DIMENSION_4D
];
ComputeStrides
(
const_cast
<
int
*>
(
outputs_
[
0
]
->
shape
().
data
()),
out_strides
,
shape_size
);
param
->
out_stride_dim0_
=
out_strides
[
0
];
param
->
out_stride_dim1_
=
out_strides
[
1
];
param
->
out_stride_dim2_
=
out_strides
[
2
];
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuDepthToSpaceInt8KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
MS_ASSERT
(
desc
.
type
==
schema
::
PrimitiveType_DepthToSpace
);
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
DepthToSpaceInt8CPUKernel
(
op_parameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new BatchToSpaceInt8CPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
kernel
::
LiteKernel
*
CpuDepthToSpaceFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
MS_ASSERT
(
desc
.
type
==
schema
::
PrimitiveType_DepthToSpace
);
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
DepthToSpaceCPUKernel
(
op_parameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new DepthToSpaceCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_DepthToSpace
,
CpuDepthToSpaceFp32KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_DepthToSpace
,
CpuDepthToSpaceInt8KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/opclib/depth_to_space.h"
namespace
mindspore
::
kernel
{
class
DepthToSpaceBaseCPUKernel
:
public
LiteKernel
{
public:
DepthToSpaceBaseCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{
opParameter
->
thread_num_
=
ctx
->
threadNum
;
}
virtual
~
DepthToSpaceBaseCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
{
return
0
;
}
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_
mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc
浏览文件 @
df6707bd
...
...
@@ -18,7 +18,7 @@
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/
fp32/
arg_min_max.h"
#include "src/runtime/kernel/arm/opclib/arg_min_max.h"
#include "include/errorcode.h"
using
mindspore
::
kernel
::
KERNEL_ARCH
::
kCPU
;
...
...
@@ -29,69 +29,19 @@ using mindspore::schema::PrimitiveType_ArgMax;
using
mindspore
::
schema
::
PrimitiveType_ArgMin
;
namespace
mindspore
::
kernel
{
namespace
{
constexpr
int
kInputNum
=
1
;
constexpr
int
kOutputNum
=
1
;
}
// namespace
int
ArgMinMaxCPUKernel
::
Init
()
{
switch
(
opParameter
->
type_
)
{
case
PrimitiveType_ArgMax
:
get_max_
=
true
;
break
;
case
PrimitiveType_ArgMin
:
get_max_
=
false
;
break
;
default:
MS_LOG
(
ERROR
)
<<
"Unexpected type "
<<
opParameter
->
type_
;
return
RET_ERROR
;
auto
ret
=
ArgMinMaxBaseCPUKernel
::
Init
();
if
(
ret
!=
RET_OK
)
{
return
ret
;
}
auto
dims_size
=
inputs_
.
at
(
0
)
->
shape
().
size
();
axis_
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
)
->
axis_
;
axis_
=
axis_
<
0
?
axis_
+
dims_size
:
axis_
;
auto
param
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
);
param
->
data_type_
=
kNumberTypeFloat32
;
return
RET_OK
;
}
int
ArgMinMaxCPUKernel
::
Run
()
{
auto
input
=
inputs_
.
at
(
0
);
auto
input_data
=
reinterpret_cast
<
float
*>
(
inputs_
.
at
(
0
)
->
Data
());
auto
output_data
=
reinterpret_cast
<
float
*>
(
outputs_
.
at
(
0
)
->
Data
());
auto
shape
=
input
->
shape
().
data
();
int
dims_number
=
input
->
shape
().
size
();
bool
out_value
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
)
->
out_value_
;
if
(
get_max_
)
{
ArgMax
(
input_data
,
shape
,
dims_number
,
axis_
,
out_value
,
output_data
);
}
else
{
ArgMin
(
input_data
,
shape
,
dims_number
,
axis_
,
out_value
,
output_data
);
}
return
RET_OK
;
auto
ret
=
ArgMinMaxBaseCPUKernel
::
Run
();
ArgMinMaxBaseCPUKernel
::
FreeTmpMemory
();
return
ret
;
}
kernel
::
LiteKernel
*
CpuArgMinMaxFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
ArgMinMaxCPUKernel
(
opParameter
,
inputs
,
outputs
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new ArgMinMaxCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_ArgMax
,
CpuArgMinMaxFp32KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_ArgMin
,
CpuArgMinMaxFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h
浏览文件 @
df6707bd
...
...
@@ -17,23 +17,20 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_ARGMINMAX_H_
#include <vector>
#include "src/
lite_kernel
.h"
#include "src/
runtime/kernel/arm/base/arg_min_max_base
.h"
namespace
mindspore
::
kernel
{
class
ArgMinMaxCPUKernel
:
public
Lite
Kernel
{
class
ArgMinMaxCPUKernel
:
public
ArgMinMaxBaseCPU
Kernel
{
public:
ArgMinMaxCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{}
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
ArgMinMaxBaseCPUKernel
(
parameter
,
inputs
,
outputs
,
ctx
)
{}
~
ArgMinMaxCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
private:
int
axis_
;
bool
get_max_
;
};
}
// namespace mindspore::kernel
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc
浏览文件 @
df6707bd
...
...
@@ -17,28 +17,14 @@
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/
fp32/
batch_to_space.h"
#include "src/runtime/kernel/arm/opclib/batch_to_space.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_FORMAT_ERR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_BatchToSpace
;
namespace
mindspore
::
kernel
{
int
BatchToSpaceCPUKernel
::
Init
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"batch_to_space only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
BatchToSpaceParameter
*
param
=
reinterpret_cast
<
BatchToSpaceParameter
*>
(
this
->
opParameter
);
for
(
int
i
=
0
;
i
<
BATCH_TO_SPACE_CROPS_SIZE
;
++
i
)
{
if
(
param
->
crops_
[
i
]
!=
0
)
{
no_crop_
=
false
;
}
}
return
RET_OK
;
return
BatchToSpaceBaseCPUKernel
::
Init
();
}
int
BatchToSpaceCPUKernel
::
Run
()
{
...
...
@@ -50,7 +36,7 @@ int BatchToSpaceCPUKernel::Run() {
auto
out_shape
=
output
->
shape
();
BatchToSpaceParameter
*
param
=
reinterpret_cast
<
BatchToSpaceParameter
*>
(
this
->
opParameter
);
if
(
no_crop_
)
{
if
(
IsNoCrop
()
)
{
BatchToSpaceNoCropForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
out_shape
[
0
],
param
->
block_shape_
,
sizeof
(
float
));
}
else
{
...
...
@@ -60,31 +46,4 @@ int BatchToSpaceCPUKernel::Run() {
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuBatchToSpaceFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
op_parameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
op_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input op_parameter is nullptr!"
;
return
nullptr
;
}
MS_ASSERT
(
desc
.
type
==
schema
::
PrimitiveType_BatchToSpace
);
auto
*
kernel
=
new
(
std
::
nothrow
)
BatchToSpaceCPUKernel
(
op_parameter
,
inputs
,
outputs
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new BatchToSpaceCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
op_parameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
op_parameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_BatchToSpace
,
CpuBatchToSpaceFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h
浏览文件 @
df6707bd
...
...
@@ -15,26 +15,21 @@
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCH_TO_SPACE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCH_TO_SPACE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/batch_to_space_base.h"
namespace
mindspore
::
kernel
{
class
BatchToSpaceCPUKernel
:
public
Lite
Kernel
{
class
BatchToSpaceCPUKernel
:
public
BatchToSpaceBaseCPU
Kernel
{
public:
BatchToSpaceCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
no_crop_
(
true
)
{}
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
BatchToSpaceBaseCPUKernel
(
parameter
,
inputs
,
outputs
,
ctx
)
{}
~
BatchToSpaceCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
private:
bool
no_crop_
;
};
}
// namespace mindspore::kernel
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc
浏览文件 @
df6707bd
...
...
@@ -17,7 +17,8 @@
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/fp32/depth_to_space.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_common.h"
#include "src/runtime/kernel/arm/opclib/depth_to_space.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
KernelRegistrar
;
...
...
@@ -30,15 +31,12 @@ using mindspore::schema::PrimitiveType_DepthToSpace;
namespace
mindspore
::
kernel
{
int
DepthToSpaceCPUKernel
::
Init
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"depth_to_space only support NHWC now!"
;
return
RET_FORMAT_ERR
;
auto
ret
=
DepthToSpaceBaseCPUKernel
::
Init
();
if
(
ret
!=
RET_OK
)
{
return
ret
;
}
DepthToSpaceParameter
*
param
=
reinterpret_cast
<
DepthToSpaceParameter
*>
(
opParameter
);
if
(
param
->
block_size_
<=
0
)
{
MS_LOG
(
ERROR
)
<<
"Input block_size should > 0!"
;
return
RET_PARAM_INVALID
;
}
param
->
data_type_size_
=
sizeof
(
float
);
return
RET_OK
;
}
...
...
@@ -48,42 +46,13 @@ int DepthToSpaceCPUKernel::Run() {
const
float
*
input_data
=
reinterpret_cast
<
const
float
*>
(
input
->
Data
());
float
*
output_data
=
reinterpret_cast
<
float
*>
(
output
->
Data
());
auto
in_shape
=
input
->
shape
();
auto
out_shape
=
output
->
shape
();
DepthToSpaceParameter
*
param
=
reinterpret_cast
<
DepthToSpaceParameter
*>
(
opParameter
);
if
(
input
->
GetFormat
()
==
schema
::
Format_NHWC
)
{
DepthToSpaceForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
out_shape
.
data
(),
in_shape
.
size
(),
param
->
block_size_
);
DepthToSpaceForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
param
);
return
RET_OK
;
}
else
{
MS_LOG
(
ERROR
)
<<
"
O
nly support NHWC now!"
;
MS_LOG
(
ERROR
)
<<
"
Depth_to_space o
nly support NHWC now!"
;
return
RET_ERROR
;
}
}
kernel
::
LiteKernel
*
CpuDepthToSpaceFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
MS_ASSERT
(
desc
.
type
==
schema
::
PrimitiveType_DepthToSpace
);
auto
*
kernel
=
new
(
std
::
nothrow
)
DepthToSpaceCPUKernel
(
opParameter
,
inputs
,
outputs
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new DepthToSpaceCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_DepthToSpace
,
CpuDepthToSpaceFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h
浏览文件 @
df6707bd
...
...
@@ -17,14 +17,14 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_DEPTH_TO_SPACE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/depth_to_space_base.h"
namespace
mindspore
::
kernel
{
class
DepthToSpaceCPUKernel
:
public
Lite
Kernel
{
class
DepthToSpaceCPUKernel
:
public
DepthToSpaceBaseCPU
Kernel
{
public:
DepthToSpaceCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{}
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
DepthToSpaceBaseCPUKernel
(
parameter
,
inputs
,
outputs
,
ctx
)
{}
~
DepthToSpaceCPUKernel
()
=
default
;
int
Init
()
override
;
...
...
mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/argminmax_int8.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/arg_min_max.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
lite
::
RET_ERROR
;
namespace
mindspore
::
kernel
{
int
ArgMinMaxInt8CPUKernel
::
Init
()
{
auto
ret
=
ArgMinMaxBaseCPUKernel
::
Init
();
if
(
ret
!=
RET_OK
)
{
return
ret
;
}
auto
param
=
reinterpret_cast
<
ArgMinMaxParameter
*>
(
opParameter
);
param
->
data_type_
=
kNumberTypeInt8
;
return
RET_OK
;
}
int
ArgMinMaxInt8CPUKernel
::
Run
()
{
auto
ret
=
ArgMinMaxBaseCPUKernel
::
Run
();
FreeTmpMemory
();
return
ret
;
}
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARGMINMAX_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARGMINMAX_INT8_H_
#include <vector>
#include "src/runtime/kernel/arm/base/arg_min_max_base.h"
namespace
mindspore
::
kernel
{
class
ArgMinMaxInt8CPUKernel
:
public
ArgMinMaxBaseCPUKernel
{
public:
ArgMinMaxInt8CPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
ArgMinMaxBaseCPUKernel
(
parameter
,
inputs
,
outputs
,
ctx
)
{}
~
ArgMinMaxInt8CPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARGMINMAX_INT8_H_
mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/batch_to_space_int8.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/batch_to_space.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
RET_OK
;
namespace
mindspore
::
kernel
{
int
BatchToSpaceInt8CPUKernel
::
Init
()
{
return
BatchToSpaceBaseCPUKernel
::
Init
();
}
int
BatchToSpaceInt8CPUKernel
::
Run
()
{
auto
input
=
inputs_
[
0
];
auto
output
=
outputs_
[
0
];
const
int8_t
*
input_data
=
reinterpret_cast
<
const
int8_t
*>
(
input
->
Data
());
int8_t
*
output_data
=
reinterpret_cast
<
int8_t
*>
(
output
->
Data
());
auto
in_shape
=
input
->
shape
();
auto
out_shape
=
output
->
shape
();
BatchToSpaceParameter
*
param
=
reinterpret_cast
<
BatchToSpaceParameter
*>
(
this
->
opParameter
);
if
(
IsNoCrop
())
{
BatchToSpaceNoCropForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
out_shape
[
0
],
param
->
block_shape_
,
sizeof
(
int8_t
));
}
else
{
BatchToSpaceForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
out_shape
[
0
],
param
->
block_shape_
,
param
->
crops_
,
sizeof
(
int8_t
));
}
return
RET_OK
;
}
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_BATCH_TO_SPACE_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_BATCH_TO_SPACE_INT8_H_
#include <vector>
#include "src/runtime/kernel/arm/base/batch_to_space_base.h"
namespace
mindspore
::
kernel
{
class
BatchToSpaceInt8CPUKernel
:
public
BatchToSpaceBaseCPUKernel
{
public:
BatchToSpaceInt8CPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
BatchToSpaceBaseCPUKernel
(
parameter
,
inputs
,
outputs
,
ctx
)
{}
~
BatchToSpaceInt8CPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_BATCH_TO_SPACE_INT8_H_
mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/depth_to_space_int8.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/depth_to_space.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
lite
::
RET_ERROR
;
namespace
mindspore
::
kernel
{
int
DepthToSpaceInt8CPUKernel
::
Init
()
{
auto
ret
=
DepthToSpaceBaseCPUKernel
::
Init
();
if
(
ret
!=
RET_OK
)
{
return
ret
;
}
DepthToSpaceParameter
*
param
=
reinterpret_cast
<
DepthToSpaceParameter
*>
(
opParameter
);
param
->
data_type_size_
=
sizeof
(
int8_t
);
return
RET_OK
;
}
int
DepthToSpaceInt8CPUKernel
::
Run
()
{
auto
input
=
inputs_
[
0
];
auto
output
=
outputs_
[
0
];
const
int8_t
*
input_data
=
reinterpret_cast
<
const
int8_t
*>
(
input
->
Data
());
int8_t
*
output_data
=
reinterpret_cast
<
int8_t
*>
(
output
->
Data
());
auto
in_shape
=
input
->
shape
();
DepthToSpaceParameter
*
param
=
reinterpret_cast
<
DepthToSpaceParameter
*>
(
opParameter
);
if
(
input
->
GetFormat
()
==
schema
::
Format_NHWC
)
{
DepthToSpaceForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
param
);
return
RET_OK
;
}
else
{
MS_LOG
(
ERROR
)
<<
"Depth_to_space only support NHWC now!"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_DEPTH_TO_SPACE_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_DEPTH_TO_SPACE_INT8_H_
#include <vector>
#include "src/runtime/kernel/arm/base/depth_to_space_base.h"
namespace
mindspore
::
kernel
{
class
DepthToSpaceInt8CPUKernel
:
public
DepthToSpaceBaseCPUKernel
{
public:
DepthToSpaceInt8CPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
DepthToSpaceBaseCPUKernel
(
parameter
,
inputs
,
outputs
,
ctx
)
{}
~
DepthToSpaceInt8CPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_DEPTH_TO_SPACE_INT8_H_
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max.cc
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/arg_min_max.h"
#include "src/runtime/kernel/arm/opclib/fp32/arg_min_max.h"
#include "src/runtime/kernel/arm/opclib/int8/arg_min_max.h"
#define FLOAT_DATA_TYPE 43
#define INT8_DATA_TYPE 32
void
GetCalcParameter
(
const
int
*
shape
,
int
dims_number
,
int
axis
,
int
*
pre_axis_count
,
int
*
axis_count
,
int
*
after_axis_count
)
{
*
pre_axis_count
=
1
;
for
(
int
i
=
0
;
i
<
axis
;
++
i
)
{
*
pre_axis_count
=
(
*
pre_axis_count
)
*
shape
[
i
];
}
*
axis_count
=
shape
[
axis
];
*
after_axis_count
=
1
;
for
(
int
i
=
axis
+
1
;
i
<
dims_number
;
++
i
)
{
*
after_axis_count
=
(
*
after_axis_count
)
*
shape
[
i
];
}
}
void
ArgMinMaxTopk1
(
const
void
*
input
,
void
*
output
,
const
int
*
shape
,
ArgMinMaxParameter
*
param
)
{
int
pre_axis_count
=
1
;
int
axis_count
=
1
;
int
after_axis_count
=
1
;
GetCalcParameter
(
shape
,
param
->
dims_size_
,
param
->
axis_
,
&
pre_axis_count
,
&
axis_count
,
&
after_axis_count
);
switch
(
param
->
data_type_
)
{
case
FLOAT_DATA_TYPE
:
{
if
(
param
->
get_max_
)
{
ArgMax
(
reinterpret_cast
<
const
float
*>
(
input
),
reinterpret_cast
<
float
*>
(
output
),
param
,
pre_axis_count
,
axis_count
,
after_axis_count
);
}
else
{
ArgMin
(
reinterpret_cast
<
const
float
*>
(
input
),
reinterpret_cast
<
float
*>
(
output
),
param
,
pre_axis_count
,
axis_count
,
after_axis_count
);
}
break
;
}
case
INT8_DATA_TYPE
:
{
if
(
param
->
get_max_
)
{
ArgMax
(
reinterpret_cast
<
const
int8_t
*>
(
input
),
reinterpret_cast
<
int8_t
*>
(
output
),
param
,
pre_axis_count
,
axis_count
,
after_axis_count
);
}
else
{
ArgMin
(
reinterpret_cast
<
const
int8_t
*>
(
input
),
reinterpret_cast
<
int8_t
*>
(
output
),
param
,
pre_axis_count
,
axis_count
,
after_axis_count
);
}
break
;
}
default:
break
;
}
}
void
ArgMinMaxTopknFp32
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
{
if
(
param
->
get_max_
)
{
switch
(
param
->
axis_
)
{
case
0
:
ArgMaxDim0
(
input
,
output
,
in_shape
,
param
);
break
;
case
1
:
ArgMaxDim1
(
input
,
output
,
in_shape
,
param
);
break
;
case
2
:
ArgMaxDim2
(
input
,
output
,
in_shape
,
param
);
break
;
case
3
:
ArgMaxDim3
(
input
,
output
,
in_shape
,
param
);
break
;
}
}
else
{
switch
(
param
->
axis_
)
{
case
0
:
ArgMinDim0
(
input
,
output
,
in_shape
,
param
);
break
;
case
1
:
ArgMinDim1
(
input
,
output
,
in_shape
,
param
);
break
;
case
2
:
ArgMinDim2
(
input
,
output
,
in_shape
,
param
);
break
;
case
3
:
ArgMinDim3
(
input
,
output
,
in_shape
,
param
);
break
;
}
}
}
void
ArgMinMaxTopknInt8
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
{
if
(
param
->
get_max_
)
{
switch
(
param
->
axis_
)
{
case
0
:
ArgMaxDim0
(
input
,
output
,
in_shape
,
param
);
break
;
case
1
:
ArgMaxDim1
(
input
,
output
,
in_shape
,
param
);
break
;
case
2
:
ArgMaxDim2
(
input
,
output
,
in_shape
,
param
);
break
;
case
3
:
ArgMaxDim3
(
input
,
output
,
in_shape
,
param
);
break
;
}
}
else
{
switch
(
param
->
axis_
)
{
case
0
:
ArgMinDim0
(
input
,
output
,
in_shape
,
param
);
break
;
case
1
:
ArgMinDim1
(
input
,
output
,
in_shape
,
param
);
break
;
case
2
:
ArgMinDim2
(
input
,
output
,
in_shape
,
param
);
break
;
case
3
:
ArgMinDim3
(
input
,
output
,
in_shape
,
param
);
break
;
}
}
}
void
ArgMinMax
(
const
void
*
input
,
void
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
{
if
(
param
->
topk_
==
1
)
{
ArgMinMaxTopk1
(
input
,
output
,
in_shape
,
param
);
return
;
}
switch
(
param
->
data_type_
)
{
case
FLOAT_DATA_TYPE
:
{
ArgMinMaxTopknFp32
(
reinterpret_cast
<
const
float
*>
(
input
),
reinterpret_cast
<
float
*>
(
output
),
in_shape
,
param
);
return
;
}
case
INT8_DATA_TYPE
:
{
ArgMinMaxTopknInt8
(
reinterpret_cast
<
const
int8_t
*>
(
input
),
reinterpret_cast
<
int8_t
*>
(
output
),
in_shape
,
param
);
return
;
}
default:
break
;
}
}
#undef FLOAT_DATA_TYPE
#undef INT8_DATA_TYPE
mindspore/lite/src/runtime/kernel/arm/opclib/
fp32/depth_to_space
.h
→
mindspore/lite/src/runtime/kernel/arm/opclib/
arg_min_max
.h
浏览文件 @
df6707bd
...
...
@@ -13,17 +13,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_DEPTH_TO_SPACE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_DEPTH_TO_SPACE_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
DepthToSpaceParameter
{
OpParameter
op_parameter_
;
int32_t
block_size_
;
};
void
DepthToSpaceForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
*
out_shape
,
int
shape_size
,
int
block_size
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_DEPTH_TO_SPACE_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARG_MIN_MAX_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARG_MIN_MAX_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/arg_min_max_parameter.h"
void
ArgMinMax
(
const
void
*
input
,
void
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARG_MIN_MAX_H_
mindspore/lite/src/runtime/kernel/arm/opclib/arg_min_max_parameter.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARG_MIN_MAX_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARG_MIN_MAX_PARAMETER_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
ArgElement
{
uint32_t
index_
;
union
ArgData
{
int8_t
i8_data_
;
int32_t
i_data_
;
float
f_data_
;
}
data_
;
};
struct
ArgMinMaxParameter
{
OpParameter
op_parameter_
;
bool
out_value_
;
bool
keep_dims_
;
bool
get_max_
;
int32_t
axis_
;
int32_t
topk_
;
int32_t
axis_type_
;
int32_t
dims_size_
;
int32_t
data_type_
;
// equals to type_id
int32_t
in_strides_
[
DIMENSION_4D
];
int32_t
out_strides_
[
DIMENSION_4D
];
ArgElement
*
arg_elements_
;
};
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARG_MIN_MAX_PARAMETER_H_
mindspore/lite/src/runtime/kernel/arm/opclib/
fp32/
batch_to_space.cc
→
mindspore/lite/src/runtime/kernel/arm/opclib/batch_to_space.cc
浏览文件 @
df6707bd
...
...
@@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/
fp32/
batch_to_space.h"
#include "src/runtime/kernel/arm/opclib/batch_to_space.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_common.h"
void
BatchToSpaceNoCropForNHWC
(
const
void
*
input
,
void
*
output
,
const
int
*
in_shape
,
int
out_n
,
const
int
*
block
,
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/
fp32/
batch_to_space.h
→
mindspore/lite/src/runtime/kernel/arm/opclib/batch_to_space.h
浏览文件 @
df6707bd
...
...
@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_
FP32_
BATCH_TO_SPACE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_
FP32_
BATCH_TO_SPACE_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_BATCH_TO_SPACE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_BATCH_TO_SPACE_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
#define BATCH_TO_SPACE_BLOCK_SHAPE_SIZE 2
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/
fp32/
depth_to_space.cc
→
mindspore/lite/src/runtime/kernel/arm/opclib/depth_to_space.cc
浏览文件 @
df6707bd
...
...
@@ -13,31 +13,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/
fp32/
depth_to_space.h"
#include
"src/runtime/kernel/arm/opclib/arithmetic_common.h"
#include "src/runtime/kernel/arm/opclib/depth_to_space.h"
#include
<string.h>
void
DepthToSpaceForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
*
out_shape
,
int
shape_size
,
int
block_size
)
{
int
*
in_strides
=
(
int
*
)(
malloc
(
sizeof
(
int
)
*
shape_size
));
ComputeStrides
(
in_shape
,
in_strides
,
shape_size
);
int
*
out_strides
=
(
int
*
)(
malloc
(
sizeof
(
int
)
*
shape_size
));
ComputeStrides
(
out_shape
,
out_strides
,
shape_size
);
void
DepthToSpaceForNHWC
(
const
void
*
input
,
void
*
output
,
int
*
in_shape
,
DepthToSpaceParameter
*
param
)
{
int32_t
block_size
=
param
->
block_size_
;
int32_t
in_shape_dim2
=
in_shape
[
2
];
int32_t
in_shape_dim1
=
in_shape
[
1
];
size_t
copy_size
=
block_size
*
param
->
out_stride_dim2_
*
param
->
data_type_size_
;
for
(
int
i
=
0
;
i
<
in_shape
[
0
];
++
i
)
{
size_t
in_offset_n
=
i
*
in_strides
[
0
]
;
size_t
out_offset_n
=
i
*
out_strides
[
0
]
;
for
(
int
j
=
0
;
j
<
in_shape
[
1
]
;
++
j
)
{
size_t
in_offset_h
=
in_offset_n
+
j
*
in_strides
[
1
]
;
size_t
out_offset_h
=
out_offset_n
+
j
*
block_size
*
out_strides
[
1
]
;
for
(
int
k
=
0
;
k
<
in_shape
[
2
]
;
++
k
)
{
size_t
in_offset_w
=
in_offset_h
+
k
*
in_strides
[
2
]
;
size_t
out_offset_w
=
out_offset_h
+
k
*
block_size
*
out_strides
[
2
]
;
size_t
in_offset_n
=
i
*
param
->
in_stride_dim0_
;
size_t
out_offset_n
=
i
*
param
->
out_stride_dim0_
;
for
(
int
j
=
0
;
j
<
in_shape
_dim1
;
++
j
)
{
size_t
in_offset_h
=
in_offset_n
+
j
*
param
->
in_stride_dim1_
;
size_t
out_offset_h
=
out_offset_n
+
j
*
block_size
*
param
->
out_stride_dim1_
;
for
(
int
k
=
0
;
k
<
in_shape
_dim2
;
++
k
)
{
size_t
in_offset_w
=
in_offset_h
+
k
*
param
->
in_stride_dim2_
;
size_t
out_offset_w
=
out_offset_h
+
k
*
block_size
*
param
->
out_stride_dim2_
;
for
(
int
l
=
0
;
l
<
block_size
;
++
l
)
{
memcpy
(
output
+
out_offset_w
+
l
*
out_strides
[
1
],
input
+
in_offset_w
+
l
*
block_size
*
out_strides
[
2
],
block_size
*
out_strides
[
2
]
*
4
);
size_t
out_offset
=
(
out_offset_w
+
l
*
param
->
out_stride_dim1_
)
*
param
->
data_type_size_
;
size_t
in_offset
=
(
in_offset_w
+
l
*
block_size
*
param
->
out_stride_dim2_
)
*
param
->
data_type_size_
;
memcpy
(
reinterpret_cast
<
int8_t
*>
(
output
)
+
out_offset
,
reinterpret_cast
<
const
int8_t
*>
(
input
)
+
in_offset
,
copy_size
);
}
}
}
}
free
(
out_strides
);
free
(
in_strides
);
}
mindspore/lite/src/runtime/kernel/arm/opclib/depth_to_space.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_DEPTH_TO_SPACE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_DEPTH_TO_SPACE_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
DepthToSpaceParameter
{
OpParameter
op_parameter_
;
int32_t
block_size_
;
int32_t
in_stride_dim0_
;
int32_t
in_stride_dim1_
;
int32_t
in_stride_dim2_
;
int32_t
out_stride_dim0_
;
int32_t
out_stride_dim1_
;
int32_t
out_stride_dim2_
;
uint8_t
data_type_size_
;
};
void
DepthToSpaceForNHWC
(
const
void
*
input
,
void
*
output
,
int
*
in_shape
,
DepthToSpaceParameter
*
param
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_DEPTH_TO_SPACE_H_
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/arg_min_max.cc
浏览文件 @
df6707bd
此差异已折叠。
点击以展开。
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/arg_min_max.h
浏览文件 @
df6707bd
...
...
@@ -16,22 +16,18 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_ARG_MIN_MAX_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_ARG_MIN_MAX_H_
#ifdef ENABLE_NEON
#include <arm_neon.h>
#endif
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/arg_min_max_parameter.h"
// For arg min, arg max.
struct
ArgMinMaxParameter
{
OpParameter
op_parameter_
;
int
axis_
;
int
topk_
;
int
axis_type_
;
bool
out_value_
;
bool
keep_dims_
;
}
;
void
ArgMax
(
const
float
*
input
,
const
int
*
shape
,
int
dims_number
,
int
axis
,
bool
out_value
,
float
*
output
);
void
ArgMin
(
const
float
*
input
,
const
int
*
shape
,
int
dims_number
,
int
axis
,
bool
out_value
,
float
*
output
);
void
ArgMax
(
const
float
*
input
,
float
*
output
,
ArgMinMaxParameter
*
param
,
int
pre_axis_count
,
int
axis_count
,
int
after_axis_count
);
void
ArgMin
(
const
float
*
input
,
float
*
output
,
ArgMinMaxParameter
*
param
,
int
pre_axis_count
,
int
axis_count
,
int
after_axis_count
)
;
void
ArgMaxDim0
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
;
void
ArgMinDim0
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
;
void
ArgMaxDim1
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
;
void
ArgMinDim1
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
;
void
ArgMaxDim2
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
)
;
void
ArgMinDim2
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMax
Dim3
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMin
Dim3
(
const
float
*
input
,
float
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_ARG_MIN_MAX_H_
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arg_min_max.cc
0 → 100644
浏览文件 @
df6707bd
此差异已折叠。
点击以展开。
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arg_min_max.h
0 → 100644
浏览文件 @
df6707bd
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARG_MIN_MAX_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARG_MIN_MAX_H_
#include "src/runtime/kernel/arm/opclib/arg_min_max_parameter.h"
void
ArgMax
(
const
int8_t
*
input
,
int8_t
*
output
,
ArgMinMaxParameter
*
param
,
int
pre_axis_count
,
int
axis_count
,
int
after_axis_count
);
void
ArgMin
(
const
int8_t
*
input
,
int8_t
*
output
,
ArgMinMaxParameter
*
param
,
int
pre_axis_count
,
int
axis_count
,
int
after_axis_count
);
void
ArgMaxDim0
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMinDim0
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMaxDim1
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMinDim1
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMaxDim2
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMinDim2
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMaxDim3
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
void
ArgMinDim3
(
const
int8_t
*
input
,
int8_t
*
output
,
const
int
*
in_shape
,
ArgMinMaxParameter
*
param
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARG_MIN_MAX_H_
mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc
浏览文件 @
df6707bd
...
...
@@ -32,7 +32,7 @@ STATUS TfliteArgmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
// These are caffe attributes, set to default value.
attr
->
axisType
=
1
;
attr
->
outMaxValue
=
false
;
attr
->
topK
=
-
1
;
attr
->
topK
=
1
;
attr
->
keepDims
=
false
;
if
(
op
!=
nullptr
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录