Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
babff262
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
babff262
编写于
8月 22, 2020
作者:
C
chenjianping
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
stack support int32
上级
50877b58
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
53 addition
and
13 deletion
+53
-13
mindspore/lite/nnacl/fp32/stack.c
mindspore/lite/nnacl/fp32/stack.c
+28
-2
mindspore/lite/nnacl/fp32/stack.h
mindspore/lite/nnacl/fp32/stack.h
+2
-0
mindspore/lite/src/ops/stack.cc
mindspore/lite/src/ops/stack.cc
+8
-6
mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc
mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc
+15
-5
未找到文件。
mindspore/lite/nnacl/fp32/stack.c
浏览文件 @
babff262
...
...
@@ -17,7 +17,7 @@
#include "nnacl/fp32/stack.h"
#include "nnacl/arithmetic_common.h"
void
DoStack
(
const
float
*
const
*
inputs
,
size_t
input_num
,
int
*
in_shape
,
size_t
shape_size
,
int
axis
,
float
*
output
)
{
size_t
GetStackCopyNum
(
int
axis
,
int
*
in_shape
,
size_t
shape_size
)
{
size_t
one_input_size
=
1
;
for
(
size_t
i
=
0
;
i
<
shape_size
;
++
i
)
{
one_input_size
*=
in_shape
[
i
];
...
...
@@ -26,11 +26,37 @@ void DoStack(const float *const *inputs, size_t input_num, int *in_shape, size_t
ComputeStrides
(
in_shape
,
in_strides
,
shape_size
);
size_t
copy_num
=
axis
>
0
?
in_strides
[
axis
-
1
]
:
one_input_size
;
size_t
copy_size
=
copy_num
*
sizeof
(
float
);
return
copy_num
;
}
size_t
GetStackPreAxisCount
(
const
int
*
in_shape
,
int
axis
)
{
size_t
pre_axis_count
=
1
;
for
(
size_t
i
=
0
;
i
<
axis
;
++
i
)
{
pre_axis_count
*=
in_shape
[
i
];
}
return
pre_axis_count
;
}
void
DoStack
(
const
float
*
const
*
inputs
,
size_t
input_num
,
int
*
in_shape
,
size_t
shape_size
,
int
axis
,
float
*
output
)
{
size_t
copy_num
=
GetStackCopyNum
(
axis
,
in_shape
,
shape_size
);
size_t
copy_size
=
copy_num
*
sizeof
(
float
);
size_t
pre_axis_count
=
GetStackPreAxisCount
(
in_shape
,
axis
);
size_t
in_offset
=
0
;
size_t
out_offset
=
0
;
for
(
size_t
i
=
0
;
i
<
pre_axis_count
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
input_num
;
++
j
)
{
memcpy
(
output
+
out_offset
,
inputs
[
j
]
+
in_offset
,
copy_size
);
out_offset
+=
copy_num
;
}
in_offset
+=
copy_num
;
}
}
void
DoStackInt32
(
const
int32_t
*
const
*
inputs
,
size_t
input_num
,
int
*
in_shape
,
size_t
shape_size
,
int
axis
,
int32_t
*
output
)
{
size_t
copy_num
=
GetStackCopyNum
(
axis
,
in_shape
,
shape_size
);
size_t
copy_size
=
copy_num
*
sizeof
(
int32_t
);
size_t
pre_axis_count
=
GetStackPreAxisCount
(
in_shape
,
axis
);
size_t
in_offset
=
0
;
size_t
out_offset
=
0
;
for
(
size_t
i
=
0
;
i
<
pre_axis_count
;
++
i
)
{
...
...
mindspore/lite/nnacl/fp32/stack.h
浏览文件 @
babff262
...
...
@@ -27,6 +27,8 @@ typedef struct StackParameter {
extern
"C"
{
#endif
void
DoStack
(
const
float
*
const
*
inputs
,
size_t
input_num
,
int
*
in_shape
,
size_t
shape_size
,
int
axis
,
float
*
output
);
void
DoStackInt32
(
const
int32_t
*
const
*
inputs
,
size_t
input_num
,
int
*
in_shape
,
size_t
shape_size
,
int
axis
,
int32_t
*
output
);
#ifdef __cplusplus
}
#endif
...
...
mindspore/lite/src/ops/stack.cc
浏览文件 @
babff262
...
...
@@ -56,7 +56,8 @@ int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::
return
RET_PARAM_INVALID
;
}
auto
input
=
inputs
.
at
(
0
);
outputs
[
0
]
->
set_data_type
(
input
->
data_type
());
auto
input0_data_type
=
input
->
data_type
();
outputs
[
0
]
->
set_data_type
(
input0_data_type
);
outputs
[
0
]
->
SetFormat
(
input
->
GetFormat
());
if
(
!
GetInferFlag
())
{
return
RET_OK
;
...
...
@@ -69,12 +70,8 @@ int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::
MS_LOG
(
ERROR
)
<<
"Invalid axis "
<<
GetAxis
();
return
RET_PARAM_INVALID
;
}
schema
::
Format
input0_format
=
input
->
GetFormat
();
for
(
size_t
i
=
1
;
i
<
inputs
.
size
();
++
i
)
{
if
(
inputs
[
i
]
->
GetFormat
()
!=
input0_format
)
{
MS_LOG
(
ERROR
)
<<
"All inputs should have the same format!"
;
return
RET_PARAM_INVALID
;
}
auto
input_shape_tmp
=
inputs
[
i
]
->
shape
();
if
(
input_shape_tmp
.
size
()
!=
input_shape
.
size
())
{
MS_LOG
(
ERROR
)
<<
"All input shape size should be the same!"
;
...
...
@@ -86,6 +83,11 @@ int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::
return
RET_PARAM_INVALID
;
}
}
if
(
inputs
[
i
]
->
data_type
()
!=
input0_data_type
)
{
MS_LOG
(
ERROR
)
<<
"All input shuld have the same data type!input["
<<
i
<<
"] data type = "
<<
inputs
[
i
]
->
data_type
();
return
RET_PARAM_INVALID
;
}
}
output_shape
.
insert
(
output_shape
.
begin
()
+
axis
,
inputs
.
size
());
outputs
[
0
]
->
set_shape
(
output_shape
);
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc
浏览文件 @
babff262
...
...
@@ -49,12 +49,21 @@ int StackCPUKernel::Run() {
}
size_t
inputs_num
=
in_tensors_
.
size
();
auto
input0_shape
=
in_tensors_
[
0
]
->
shape
();
auto
*
output_data
=
reinterpret_cast
<
float
*>
(
out_tensors_
[
0
]
->
Data
());
float
*
inputs
[
inputs_num
];
for
(
size_t
i
=
0
;
i
<
inputs_num
;
++
i
)
{
inputs
[
i
]
=
reinterpret_cast
<
float
*>
(
in_tensors_
[
i
]
->
Data
());
if
(
in_tensors_
[
0
]
->
data_type
()
==
kNumberTypeFloat32
||
in_tensors_
[
0
]
->
data_type
()
==
kNumberTypeFloat
)
{
auto
*
output_data
=
reinterpret_cast
<
float
*>
(
out_tensors_
[
0
]
->
Data
());
float
*
inputs
[
inputs_num
];
for
(
size_t
i
=
0
;
i
<
inputs_num
;
++
i
)
{
inputs
[
i
]
=
reinterpret_cast
<
float
*>
(
in_tensors_
[
i
]
->
Data
());
}
DoStack
(
inputs
,
inputs_num
,
input0_shape
.
data
(),
input0_shape
.
size
(),
axis_
,
output_data
);
}
else
{
auto
*
output_data
=
reinterpret_cast
<
int32_t
*>
(
out_tensors_
[
0
]
->
Data
());
int32_t
*
inputs
[
inputs_num
];
for
(
size_t
i
=
0
;
i
<
inputs_num
;
++
i
)
{
inputs
[
i
]
=
reinterpret_cast
<
int32_t
*>
(
in_tensors_
[
i
]
->
Data
());
}
DoStackInt32
(
inputs
,
inputs_num
,
input0_shape
.
data
(),
input0_shape
.
size
(),
axis_
,
output_data
);
}
DoStack
(
inputs
,
inputs_num
,
input0_shape
.
data
(),
input0_shape
.
size
(),
axis_
,
output_data
);
return
RET_OK
;
}
...
...
@@ -85,4 +94,5 @@ kernel::LiteKernel *CpuStackFp32KernelCreator(const std::vector<lite::tensor::Te
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_Stack
,
CpuStackFp32KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeInt32
,
PrimitiveType_Stack
,
CpuStackFp32KernelCreator
)
}
// namespace mindspore::kernel
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录