Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
59c4f31f
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
59c4f31f
编写于
8月 10, 2020
作者:
Y
yangruoqi713
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MS][LITE] fix bug of arm cpu int8 op: conv_depthwise
上级
99ffe64b
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
17 addition
and
18 deletion
+17
-18
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.cc
.../src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.cc
+16
-17
mindspore/lite/tools/common/node_util.cc
mindspore/lite/tools/common/node_util.cc
+1
-1
未找到文件。
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.cc
浏览文件 @
59c4f31f
...
...
@@ -159,18 +159,17 @@ void ConvDwInt8(int8_t *output_data, const int16_t *input_data, const int16_t *w
if
(
sliding
->
right_
>
sliding
->
left_
&&
sliding
->
bottom_
>
sliding
->
top_
)
{
int
in_h_start
=
sliding
->
top_
*
conv_param
->
stride_h_
-
conv_param
->
pad_h_
;
int
in_w_start
=
sliding
->
left_
*
conv_param
->
stride_w_
-
conv_param
->
pad_w_
;
const
int16_t
*
in_t
=
src_data
+
in_h_start
*
sliding
->
in_h_step_
+
in_w_start
*
C4NUM
;
int8_t
*
out_t
=
dst_data
+
sliding
->
top_
*
sliding
->
out_h_step_
+
sliding
->
left_
*
C4NUM
;
const
int16_t
*
in_t
=
src_data
+
in_h_start
*
sliding
->
in_h_step_
+
in_w_start
*
sliding
->
block_channel_
;
int8_t
*
out_t
=
dst_data
+
sliding
->
top_
*
sliding
->
out_h_step_
+
sliding
->
left_
*
sliding
->
block_channel_
;
#ifdef ENABLE_ARM64
ConvDwInt8Center
(
out_t
,
in_t
,
weight
,
bias
,
sliding
->
bottom_
-
sliding
->
top_
,
sliding
->
right_
-
sliding
->
left_
,
conv_param
->
kernel_h_
,
conv_param
->
kernel_w_
,
sliding
->
out_h_step_
*
sizeof
(
int8_t
),
sliding
->
block_channel_
*
sizeof
(
int8_t
),
sliding
->
in_sh_step_
*
sizeof
(
int16_t
),
sliding
->
in_sw_step_
*
sizeof
(
int16_t
),
sliding
->
in_kh_step_
*
sizeof
(
int16_t
),
sliding
->
in_kw_step_
*
sizeof
(
int16_t
),
conv_param
->
conv_quant_arg_
.
quant_multiplier_
[
0
],
conv_param
->
conv_quant_arg_
.
left_shift_
[
0
],
conv_param
->
conv_quant_arg_
.
right_shift_
[
0
],
conv_param
->
conv_quant_arg_
.
quant_args_
[
2
][
0
].
zp_
,
conv_param
->
conv_quant_arg_
.
out_act_min_
[
0
],
conv_param
->
conv_quant_arg_
.
out_act_max_
[
0
]);
ConvDwInt8Center
(
out_t
,
in_t
,
weight
,
bias
,
sliding
->
bottom_
-
sliding
->
top_
,
sliding
->
right_
-
sliding
->
left_
,
conv_param
->
kernel_h_
,
conv_param
->
kernel_w_
,
sliding
->
out_h_step_
*
sizeof
(
int8_t
),
sliding
->
block_channel_
*
sizeof
(
int8_t
),
sliding
->
in_sh_step_
*
sizeof
(
int16_t
),
sliding
->
in_sw_step_
*
sizeof
(
int16_t
),
sliding
->
in_kh_step_
*
sizeof
(
int16_t
),
sliding
->
in_kw_step_
*
sizeof
(
int16_t
),
conv_param
->
conv_quant_arg_
.
quant_multiplier_
[
0
],
conv_param
->
conv_quant_arg_
.
left_shift_
[
0
],
conv_param
->
conv_quant_arg_
.
right_shift_
[
0
],
conv_param
->
conv_quant_arg_
.
quant_args_
[
2
][
0
].
zp_
,
conv_param
->
conv_quant_arg_
.
out_act_min_
[
0
],
conv_param
->
conv_quant_arg_
.
out_act_max_
[
0
]);
#else
DepthwiseCenterInt8
(
out_t
,
in_t
,
weight
,
bias
,
sliding
->
bottom_
-
sliding
->
top_
,
sliding
->
right_
-
sliding
->
left_
,
...
...
@@ -315,15 +314,15 @@ void DeconvDwInt8(int8_t *output_data, int32_t *output_buffer, const int16_t *in
if
(
sliding
->
right_
>
sliding
->
left_
&&
sliding
->
bottom_
>
sliding
->
top_
)
{
int
oh_h_start
=
sliding
->
top_
*
conv_param
->
stride_h_
-
conv_param
->
pad_h_
;
int
oh_w_start
=
sliding
->
left_
*
conv_param
->
stride_w_
-
conv_param
->
pad_w_
;
int32_t
*
out_t
=
output_buffer
+
oh_h_start
*
sliding
->
in_h_step_
+
oh_w_start
*
C4NUM
;
int32_t
*
out_t
=
output_buffer
+
oh_h_start
*
sliding
->
in_h_step_
+
oh_w_start
*
sliding
->
block_channel_
;
const
int16_t
*
in_t
=
src_data
+
sliding
->
top_
*
sliding
->
out_h_step_
+
sliding
->
left_
*
sliding
->
block_channel_
;
#ifdef ENABLE_ARM64
DeconvDwInt8Center
(
out_t
,
in_t
,
weight
,
sliding
->
bottom_
-
sliding
->
top_
,
sliding
->
right_
-
sliding
->
left_
,
conv_param
->
kernel_h_
,
conv_param
->
kernel_w_
,
sliding
->
out_h_step_
*
sizeof
(
int16_t
),
sliding
->
block_channel_
*
sizeof
(
int16
_t
),
sliding
->
in_s
h_step_
*
sizeof
(
int32_t
),
sliding
->
in_sw
_step_
*
sizeof
(
int32_t
),
sliding
->
in_k
h_step_
*
sizeof
(
int32_t
),
sliding
->
in_k
w_step_
*
sizeof
(
int32_t
));
DeconvDwInt8Center
(
out_t
,
in_t
,
weight
,
sliding
->
bottom_
-
sliding
->
top_
,
sliding
->
right_
-
sliding
->
left_
,
conv_param
->
kernel_h_
,
conv_param
->
kernel_w_
,
sliding
->
out_h_step_
*
sizeof
(
int16_t
)
,
sliding
->
block_channel_
*
sizeof
(
int16_t
),
sliding
->
in_sh_step_
*
sizeof
(
int32
_t
),
sliding
->
in_s
w_step_
*
sizeof
(
int32_t
),
sliding
->
in_kh
_step_
*
sizeof
(
int32_t
),
sliding
->
in_kw_step_
*
sizeof
(
int32_t
));
#else
DeconvDepthwiseCenterInt8
(
out_t
,
in_t
,
weight
,
sliding
->
bottom_
-
sliding
->
top_
,
sliding
->
right_
-
sliding
->
left_
,
conv_param
->
kernel_h_
,
conv_param
->
kernel_w_
,
...
...
mindspore/lite/tools/common/node_util.cc
浏览文件 @
59c4f31f
...
...
@@ -96,7 +96,7 @@ static const std::vector<schema::PrimitiveType> nhwcOpList = {
schema
::
PrimitiveType_Conv2D
,
schema
::
PrimitiveType_DeConv2D
,
schema
::
PrimitiveType_DepthwiseConv2D
,
schema
::
PrimitiveType_DeDepthwiseConv2D
,
schema
::
PrimitiveType_Pooling
,
schema
::
PrimitiveType_Resize
,
schema
::
PrimitiveType_
Fused
BatchNorm
};
schema
::
PrimitiveType_BatchNorm
};
static
const
std
::
vector
<
schema
::
PrimitiveType
>
fp32FullOpList
=
{
schema
::
PrimitiveType_Concat
,
schema
::
PrimitiveType_Add
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录