Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
bf4874fa
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
bf4874fa
编写于
7月 31, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 31, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3772 pad modify
Merge pull request !3772 from zhaozhenlong/lite/issue/pad_assign_param
上级
b9d60c56
8e3eaae2
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
34 addition
and
65 deletion
+34
-65
mindspore/lite/src/ops/ops.cc
mindspore/lite/src/ops/ops.cc
+4
-0
mindspore/lite/src/ops/pad.cc
mindspore/lite/src/ops/pad.cc
+0
-2
mindspore/lite/src/populate_parameter.cc
mindspore/lite/src/populate_parameter.cc
+2
-0
mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc
mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc
+6
-17
mindspore/lite/src/runtime/kernel/arm/fp32/pad.h
mindspore/lite/src/runtime/kernel/arm/fp32/pad.h
+6
-3
mindspore/lite/src/runtime/kernel/arm/opclib/common_func.h
mindspore/lite/src/runtime/kernel/arm/opclib/common_func.h
+10
-1
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/pad.cc
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/pad.cc
+4
-4
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/pad.h
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/pad.h
+0
-1
mindspore/lite/src/runtime/kernel/arm/opclib/int8/pad.cc
mindspore/lite/src/runtime/kernel/arm/opclib/int8/pad.cc
+1
-0
mindspore/lite/src/runtime/kernel/arm/opclib/int8/pad.h
mindspore/lite/src/runtime/kernel/arm/opclib/int8/pad.h
+0
-1
mindspore/lite/src/runtime/kernel/arm/opclib/offset_utils.h
mindspore/lite/src/runtime/kernel/arm/opclib/offset_utils.h
+0
-34
mindspore/lite/src/runtime/kernel/arm/opclib/resize.cc
mindspore/lite/src/runtime/kernel/arm/opclib/resize.cc
+1
-2
未找到文件。
mindspore/lite/src/ops/ops.cc
浏览文件 @
bf4874fa
...
...
@@ -127,6 +127,10 @@ Primitive *Primitive::CreatePrimitive(schema::Primitive *primitive) {
return
new
lite
::
Flatten
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_StridedSlice
:
return
new
lite
::
StridedSlice
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_Resize
:
return
new
lite
::
Resize
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_OneHot
:
return
new
lite
::
OneHot
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
default:
break
;
}
...
...
mindspore/lite/src/ops/pad.cc
浏览文件 @
bf4874fa
...
...
@@ -37,14 +37,12 @@ int Pad::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Te
if
(
paddings
==
nullptr
)
{
return
RET_NULL_PTR
;
}
MS_ASSERT
(
paddings
->
size
()
==
kPaddingsSize
);
auto
input
=
inputs
.
front
();
if
(
input
==
nullptr
)
{
return
RET_NULL_PTR
;
}
auto
input_shape
=
input
->
shape
();
MS_ASSERT
(
input_shape
.
size
()
==
kInputRank
);
std
::
vector
<
int
>
output_shape
;
for
(
size_t
i
=
0
;
i
<
input_shape
.
size
();
i
++
)
{
auto
shape
=
input_shape
[
i
]
+
(
*
paddings
)[
2
*
i
]
+
(
*
paddings
)[
2
*
i
+
1
];
...
...
mindspore/lite/src/populate_parameter.cc
浏览文件 @
bf4874fa
...
...
@@ -383,12 +383,14 @@ PadParameter *PopulatePadParameter(const lite::Primitive *primitive) {
pad_param
->
constant_value_
=
pad_node
->
constantValue
();
}
else
{
MS_LOG
(
ERROR
)
<<
"Invalid padding mode: "
<<
pad_param
->
pad_mode_
;
delete
(
pad_param
);
return
nullptr
;
}
auto
size
=
pad_node
->
paddings
()
->
size
();
if
(
size
>
MAX_PAD_SIZE
)
{
MS_LOG
(
ERROR
)
<<
"Invalid padding size: "
<<
size
;
delete
(
pad_param
);
return
nullptr
;
}
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc
浏览文件 @
bf4874fa
...
...
@@ -33,8 +33,6 @@ namespace mindspore::kernel {
namespace
{
constexpr
int
kInputNum
=
1
;
constexpr
int
kOutputNum
=
1
;
constexpr
int
kInputRank
=
4
;
constexpr
int
kPaddingsSize
=
8
;
}
// namespace
int
PadCPUKernel
::
Init
()
{
...
...
@@ -52,21 +50,14 @@ int PadCPUKernel::Init() {
}
auto
rank
=
input
->
shape
().
size
();
if
(
rank
!=
kInputRank
)
{
MS_LOG
(
ERROR
)
<<
"Pad input rank should
be "
<<
kInputRank
<<
", got "
<<
rank
;
if
(
rank
>
DEFAULT_PAD_NDIMS
)
{
MS_LOG
(
ERROR
)
<<
"Pad input rank should
<= "
<<
DEFAULT_PAD_NDIMS
<<
", got "
<<
rank
;
return
RET_ERROR
;
}
if
(
paddings_size_
!=
kPaddingsSize
)
{
MS_LOG
(
ERROR
)
<<
"Pad op paddings size should be 2*input_rank: "
<<
2
*
rank
<<
" but got "
<<
paddings_size_
;
return
RET_ERROR
;
}
for
(
auto
pad
:
paddings_
)
{
if
(
pad
<
0
)
{
MS_LOG
(
ERROR
)
<<
"Pad op paddings should be >= 0, but got "
<<
pad
;
return
RET_ERROR
;
}
for
(
int
i
=
0
;
i
<
rank
;
i
++
)
{
in_
[
DEFAULT_PAD_NDIMS
-
rank
+
i
]
=
input
->
shape
()[
i
];
out_
[
DEFAULT_PAD_NDIMS
-
rank
+
i
]
=
output
->
shape
()[
i
];
}
return
RET_OK
;
}
...
...
@@ -87,10 +78,8 @@ int PadCPUKernel::RunImpl(int task_id) {
auto
input_data
=
reinterpret_cast
<
float
*>
(
input
->
Data
());
auto
output_data
=
reinterpret_cast
<
float
*>
(
output
->
Data
());
auto
input_shape
=
input
->
shape
().
data
();
auto
output_shape
=
output
->
shape
().
data
();
Pad
(
input_data
,
output_data
,
in
put_shape
,
output_shape
,
paddings_
.
data
()
,
task_id
,
context_
->
threadNum
);
Pad
(
input_data
,
output_data
,
in
_
,
out_
,
pad_param_
->
paddings_
,
task_id
,
context_
->
threadNum
);
return
RET_OK
;
}
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/pad.h
浏览文件 @
bf4874fa
...
...
@@ -27,7 +27,9 @@ class PadCPUKernel : public LiteKernel {
public:
PadCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
context_
(
ctx
)
{}
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
context_
(
ctx
)
{
pad_param_
=
reinterpret_cast
<
PadParameter
*>
(
parameter
);
}
~
PadCPUKernel
()
{}
...
...
@@ -37,9 +39,10 @@ class PadCPUKernel : public LiteKernel {
int
RunImpl
(
int
task_id
);
private:
std
::
vector
<
int
>
paddings_
;
size_t
paddings_size_
;
const
lite
::
Context
*
context_
;
const
PadParameter
*
pad_param_
;
int
in_
[
4
]
=
{
1
,
1
,
1
,
1
};
int
out_
[
4
]
=
{
1
,
1
,
1
,
1
};
};
}
// namespace mindspore::kernel
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/common_func.h
浏览文件 @
bf4874fa
...
...
@@ -41,6 +41,16 @@ void IndirectGemmFp32_Comm(float *output, const float *input, const float *weigh
void
IndirectGemmFp32
(
float
*
output
,
const
float
*
input
,
const
float
*
weight
,
const
float
*
bias
,
size_t
step
,
int
ic4
,
int
output_channel
,
size_t
offset
,
size_t
relu
,
size_t
relu6
);
inline
int
offset
(
const
int
*
shape
,
const
int
dim0
,
const
int
dim1
,
const
int
dim2
,
const
int
dim3
)
{
return
((
dim0
*
shape
[
1
]
+
dim1
)
*
shape
[
2
]
+
dim2
)
*
shape
[
3
]
+
dim3
;
}
inline
int
offsetComm
(
const
int
*
shape
,
const
int
dim0
,
const
int
dim1
,
const
int
dim2
)
{
return
((
dim0
*
shape
[
1
]
+
dim1
)
*
shape
[
2
]
+
dim2
)
*
shape
[
3
];
}
inline
int
offset4d
(
const
int
*
shape
,
const
int
*
dims
)
{
return
offset
(
shape
,
dims
[
0
],
dims
[
1
],
dims
[
2
],
dims
[
3
]);
}
#ifdef ENABLE_ARM64
void
BiasAdd
(
const
float
*
bias
,
float
*
data
,
size_t
oc4
,
size_t
plan_size
);
void
BiasAddRelu6
(
const
float
*
bias
,
float
*
data
,
size_t
oc4
,
size_t
plan_size
);
...
...
@@ -54,4 +64,3 @@ void Relu(float *data, size_t element4);
#endif
#endif
/* MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_COMMON_FUNC_H_ */
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/pad.cc
浏览文件 @
bf4874fa
...
...
@@ -15,6 +15,7 @@
*/
#include "src/runtime/kernel/arm/opclib/fp32/pad.h"
#include "src/runtime/kernel/arm/opclib/common_func.h"
void
Pad
(
const
float
*
input_data
,
float
*
output_data
,
const
int
*
input_shape
,
const
int
*
output_shape
,
const
int
*
paddings
,
const
int
tid
,
const
int
thread_num
)
{
...
...
@@ -25,10 +26,9 @@ void Pad(const float *input_data, float *output_data, const int *input_shape, co
out
[
1
]
=
in
[
1
]
+
paddings
[
2
];
for
(
in
[
2
]
=
0
;
in
[
2
]
<
input_shape
[
2
];
in
[
2
]
++
)
{
out
[
2
]
=
in
[
2
]
+
paddings
[
4
];
for
(
in
[
3
]
=
0
;
in
[
3
]
<
input_shape
[
3
];
in
[
3
]
++
)
{
out
[
3
]
=
in
[
3
]
+
paddings
[
6
];
output_data
[
offset4d
(
output_shape
,
out
)]
=
input_data
[
offset4d
(
input_shape
,
in
)];
}
float
*
dst
=
output_data
+
offset
(
output_shape
,
out
[
0
],
out
[
1
],
out
[
2
],
paddings
[
6
]);
const
float
*
src
=
input_data
+
offset
(
input_shape
,
in
[
0
],
in
[
1
],
in
[
2
],
0
);
memcpy
(
dst
,
src
,
input_shape
[
3
]
*
sizeof
(
float
));
}
}
}
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/pad.h
浏览文件 @
bf4874fa
...
...
@@ -21,7 +21,6 @@
#endif
#include <memory.h>
#include <float.h>
#include "src/runtime/kernel/arm/opclib/offset_utils.h"
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/pad_parameter.h"
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/int8/pad.cc
浏览文件 @
bf4874fa
...
...
@@ -15,6 +15,7 @@
*/
#include "src/runtime/kernel/arm/opclib/int8/pad.h"
#include "src/runtime/kernel/arm/opclib/common_func.h"
void
PadConstant4D
(
const
int8_t
*
in_data
,
int8_t
*
out_data
,
const
int32_t
*
in_dims
,
const
int32_t
*
out_dims
,
const
int32_t
*
paddings
)
{
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/int8/pad.h
浏览文件 @
bf4874fa
...
...
@@ -19,7 +19,6 @@
#include <string.h>
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/offset_utils.h"
#include "src/runtime/kernel/arm/opclib/pad_parameter.h"
void
PadConstant4D
(
const
int8_t
*
in_data
,
int8_t
*
out_data
,
const
int32_t
*
in_dims
,
const
int32_t
*
out_dims
,
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/offset_utils.h
已删除
100644 → 0
浏览文件 @
b9d60c56
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_OFFSET_UTILS_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_OFFSET_UTILS_H_
#ifdef ENABLE_NEON
#include <arm_neon.h>
#endif
inline
int
offset
(
const
int
*
shape
,
const
int
dim0
,
const
int
dim1
,
const
int
dim2
,
const
int
dim3
)
{
return
((
dim0
*
shape
[
1
]
+
dim1
)
*
shape
[
2
]
+
dim2
)
*
shape
[
3
]
+
dim3
;
}
inline
int
offsetComm
(
const
int
*
shape
,
const
int
dim0
,
const
int
dim1
,
const
int
dim2
)
{
return
((
dim0
*
shape
[
1
]
+
dim1
)
*
shape
[
2
]
+
dim2
)
*
shape
[
3
];
}
inline
int
offset4d
(
const
int
*
shape
,
const
int
*
dims
)
{
return
offset
(
shape
,
dims
[
0
],
dims
[
1
],
dims
[
2
],
dims
[
3
]);
}
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_OFFSET_UTILS_H_
mindspore/lite/src/runtime/kernel/arm/opclib/resize.cc
浏览文件 @
bf4874fa
...
...
@@ -15,8 +15,7 @@
*/
#include <math.h>
#include "src/runtime/kernel/arm/opclib/resize.h"
#include "src/runtime/kernel/arm/opclib/offset_utils.h"
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/common_func.h"
int
ResizeBilinear
(
const
float
*
input_data
,
float
*
output_data
,
const
int
*
input_shape
,
const
int
*
output_shape
,
bool
align_corners
,
int
tid
,
int
thread_num
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录