Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
73ab2433
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
73ab2433
编写于
9月 04, 2020
作者:
Z
zhaozhenlong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix codex
上级
fbf8a3bb
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
44 addition
and
23 deletion
+44
-23
mindspore/lite/nnacl/fp32/resize.c
mindspore/lite/nnacl/fp32/resize.c
+2
-2
mindspore/lite/nnacl/fp32/roi_pooling.c
mindspore/lite/nnacl/fp32/roi_pooling.c
+1
-0
mindspore/lite/nnacl/int8/leaky_relu_int8.c
mindspore/lite/nnacl/int8/leaky_relu_int8.c
+9
-3
mindspore/lite/nnacl/int8/leaky_relu_int8.h
mindspore/lite/nnacl/int8/leaky_relu_int8.h
+1
-1
mindspore/lite/nnacl/int8/resize.c
mindspore/lite/nnacl/int8/resize.c
+13
-3
mindspore/lite/nnacl/int8/resize.h
mindspore/lite/nnacl/int8/resize.h
+1
-1
mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc
...pore/lite/src/runtime/kernel/arm/base/convolution_base.cc
+1
-0
mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc
...spore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc
+6
-1
mindspore/lite/src/runtime/thread_pool.c
mindspore/lite/src/runtime/thread_pool.c
+2
-3
model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.cpp
.../lite/image_classification/app/src/main/cpp/MSNetWork.cpp
+2
-0
model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h
...al/lite/image_classification/app/src/main/cpp/MSNetWork.h
+2
-5
model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp
...ge_classification/app/src/main/cpp/MindSporeNetnative.cpp
+4
-4
未找到文件。
mindspore/lite/nnacl/fp32/resize.c
浏览文件 @
73ab2433
...
...
@@ -91,13 +91,13 @@ int ResizeBilinear(const float *input_data, float *output_data, const int *input
int
y_bottom
=
y_bottoms
[
h
];
int
y_top
=
y_tops
[
h
];
float
y_bottom_weight
=
y_bottom_weights
[
h
];
float
y_top_weight
=
1
.
0
f
-
y_bottom_weight
;
const
float
y_top_weight
=
1
.
0
f
-
y_bottom_weight
;
for
(
w
=
0
;
w
<
new_width
;
w
++
)
{
int
x_left
=
x_lefts
[
w
];
int
x_right
=
x_rights
[
w
];
float
x_left_weight
=
x_left_weights
[
w
];
float
x_right_weight
=
1
.
0
f
-
x_left_weight
;
const
float
x_right_weight
=
1
.
0
f
-
x_left_weight
;
float
top_left_weight
=
y_top_weight
*
x_left_weight
;
float
top_right_weight
=
y_top_weight
*
x_right_weight
;
float
bottom_left_weight
=
y_bottom_weight
*
x_left_weight
;
...
...
mindspore/lite/nnacl/fp32/roi_pooling.c
浏览文件 @
73ab2433
...
...
@@ -41,6 +41,7 @@ int ROIPooling(float *in_ptr, float *out_ptr, float *roi, int tid, ROIPoolingPar
for
(
int
i
=
roi_st
;
i
<
roi_end
;
++
i
)
{
int
roi_batch_ind
=
(
int
)
roi
[
roi_ind_st
];
// batch_index
if
(
roi_batch_ind
>=
batch_size
)
{
free
(
max_c
);
return
NNACL_ERRCODE_INDEX_OUT_OF_RANGE
;
}
int
roi_start_h
=
(
int
)
roundf
(
roi
[
roi_ind_st
+
1
]
*
scale
);
// top-left x1
...
...
mindspore/lite/nnacl/int8/leaky_relu_int8.c
浏览文件 @
73ab2433
...
...
@@ -15,17 +15,21 @@
*/
#include "nnacl/int8/leaky_relu_int8.h"
#include "nnacl/errorcode.h"
void
DoLeakReluInt8
(
int8_t
*
inputs
,
int8_t
*
output_ptr
,
LeakyReluQuantArg
*
quant_prelu_parm
,
int
task_id
)
{
int
DoLeakReluInt8
(
int8_t
*
inputs
,
int8_t
*
output_ptr
,
LeakyReluQuantArg
*
quant_prelu_parm
,
int
task_id
)
{
if
(
quant_prelu_parm
==
NULL
)
{
return
;
return
NNACL_NULL_PTR
;
}
float
output_scale
=
quant_prelu_parm
->
quant_arg
.
out_args_
.
scale_
;
int
output_zp
=
quant_prelu_parm
->
quant_arg
.
out_args_
.
zp_
;
const
float
output_inverse_scale
=
1
.
f
/
output_scale
;
int
output_dim
=
quant_prelu_parm
->
input_dim_
;
QuantArg
*
input_quant
=
NULL
;
QuantArg
*
input_quant
=
malloc
(
sizeof
(
QuantArg
)
*
output_dim
);
if
(
input_quant
==
NULL
)
{
return
NNACL_NULL_PTR
;
}
for
(
int
i
=
0
;
i
<
output_dim
;
i
++
)
{
input_quant
[
i
].
scale_
=
quant_prelu_parm
->
quant_arg
.
in_args_
.
scale_
;
input_quant
[
i
].
zp_
=
quant_prelu_parm
->
quant_arg
.
in_args_
.
zp_
;
...
...
@@ -56,4 +60,6 @@ void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant
}
}
}
free
(
input_quant
);
return
NNACL_OK
;
}
mindspore/lite/nnacl/int8/leaky_relu_int8.h
浏览文件 @
73ab2433
...
...
@@ -23,7 +23,7 @@
#ifdef __cplusplus
extern
"C"
{
#endif
void
DoLeakReluInt8
(
int8_t
*
inputs
,
int8_t
*
output_ptr
,
LeakyReluQuantArg
*
quant_Prelu_parm
,
int
task_id
);
int
DoLeakReluInt8
(
int8_t
*
inputs
,
int8_t
*
output_ptr
,
LeakyReluQuantArg
*
quant_Prelu_parm
,
int
task_id
);
#ifdef __cplusplus
}
#endif
...
...
mindspore/lite/nnacl/int8/resize.c
浏览文件 @
73ab2433
...
...
@@ -101,8 +101,14 @@ int ResizeBilinearInt8WithFloatWeight(const int8_t *input_data, int8_t *output_d
int32_t
new_height
=
output_shape
[
1
];
int32_t
new_width
=
output_shape
[
2
];
float
height_scale
,
width_scale
;
ComputeScaleFloat
(
in_h
,
new_height
,
align_corners
,
&
height_scale
);
ComputeScaleFloat
(
in_w
,
new_width
,
align_corners
,
&
width_scale
);
int
ret
=
ComputeScaleFloat
(
in_h
,
new_height
,
align_corners
,
&
height_scale
);
if
(
ret
!=
NNACL_OK
)
{
return
ret
;
}
ret
=
ComputeScaleFloat
(
in_w
,
new_width
,
align_corners
,
&
width_scale
);
if
(
ret
!=
NNACL_OK
)
{
return
ret
;
}
int
n
,
h
,
w
,
c
;
for
(
n
=
0
;
n
<
in_n
;
n
++
)
{
...
...
@@ -189,11 +195,15 @@ void ComputeInterpolationArgs(const int32_t pos, const int32_t scale, const int3
*
scaled_high_weight
=
*
scaled_pos
-
(
1
<<
10
)
*
(
*
low
);
}
void
ComputeScaleFloat
(
const
int32_t
in_value
,
const
int32_t
out_value
,
const
bool
align_corners
,
float
*
scale
)
{
int
ComputeScaleFloat
(
const
int32_t
in_value
,
const
int32_t
out_value
,
const
bool
align_corners
,
float
*
scale
)
{
if
(
out_value
==
0
)
{
return
NNACL_ERRCODE_DIVISOR_ZERO
;
}
*
scale
=
(
float
)
in_value
/
out_value
;
if
(
align_corners
&&
out_value
>
1
)
{
*
scale
=
(
float
)(
in_value
-
1
)
/
(
out_value
-
1
);
}
return
NNACL_OK
;
}
void
ComputeInterpolationArgsFloatWeight
(
const
int32_t
pos
,
const
float
scale
,
const
int32_t
size
,
float
*
actual_pos
,
...
...
mindspore/lite/nnacl/int8/resize.h
浏览文件 @
73ab2433
...
...
@@ -40,7 +40,7 @@ void ComputeScale(const int32_t in_value, const int32_t out_value, const bool al
void
ComputeInterpolationArgs
(
const
int32_t
pos
,
const
int32_t
scale
,
const
int32_t
size
,
int32_t
*
scaled_pos
,
int32_t
*
low
,
int32_t
*
scaled_low_weight
,
int32_t
*
high
,
int32_t
*
scaled_high_weight
);
void
ComputeScaleFloat
(
const
int32_t
in_value
,
const
int32_t
out_value
,
const
bool
align_corners
,
float
*
scale
);
int
ComputeScaleFloat
(
const
int32_t
in_value
,
const
int32_t
out_value
,
const
bool
align_corners
,
float
*
scale
);
void
ComputeInterpolationArgsFloatWeight
(
const
int32_t
pos
,
const
float
scale
,
const
int32_t
size
,
float
*
actual_pos
,
int32_t
*
low
,
float
*
low_weight
,
int32_t
*
high
,
float
*
high_weight
);
...
...
mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc
浏览文件 @
73ab2433
...
...
@@ -348,6 +348,7 @@ int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor)
size_t
channels
=
static_cast
<
size_t
>
(
input_tensor
->
Batch
());
if
(
input_tensor
->
GetQuantParams
().
size
()
!=
channels
)
{
MS_LOG
(
ERROR
)
<<
"Quant param not equal channel num "
<<
input_tensor
->
GetQuantParams
().
size
()
<<
channels
;
free
(
dequant_data
);
return
RET_ERROR
;
}
size_t
per_channel_size
=
input_tensor
->
DataSize
()
/
channels
;
...
...
mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc
浏览文件 @
73ab2433
...
...
@@ -20,6 +20,7 @@
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/errorcode.h"
using
mindspore
::
kernel
::
KERNEL_ARCH
::
kCPU
;
using
mindspore
::
lite
::
KernelRegistrar
;
...
...
@@ -105,7 +106,11 @@ int LeakyReluInt8CPUKernel::DoExecute(int task_id) {
auto
out_tensor
=
out_tensors_
.
at
(
kOutputIndex
);
int8_t
*
input_data
=
reinterpret_cast
<
int8_t
*>
(
input_tensor
->
Data
());
int8_t
*
output_data
=
reinterpret_cast
<
int8_t
*>
(
out_tensor
->
Data
());
DoLeakReluInt8
(
input_data
,
output_data
,
&
quant_prelu_parm_
,
task_id
);
auto
ret
=
DoLeakReluInt8
(
input_data
,
output_data
,
&
quant_prelu_parm_
,
task_id
);
if
(
ret
!=
NNACL_OK
)
{
MS_LOG
(
ERROR
)
<<
"DoLeakReluInt8 failed"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
...
...
mindspore/lite/src/runtime/thread_pool.c
浏览文件 @
73ab2433
...
...
@@ -500,12 +500,11 @@ int DistributeTask(int thread_pool_id, Task *task, int task_num) {
}
while
(
!
k_success_flag
);
}
// master thread
task
->
func
(
task
->
content
,
size
-
1
);
if
(
task
->
func
==
NULL
)
{
LOG_ERROR
(
"task->func is nullptr"
);
return
RET_TP_ERROR
;
}
task
->
func
(
task
->
content
,
size
-
1
);
// wait
WaitAllThread
(
thread_pool_id
);
return
RET_TP_OK
;
...
...
@@ -547,11 +546,11 @@ void ThreadRun(Thread *thread) {
while
(
thread_pool
->
is_alive
)
{
while
(
thread
->
activate
)
{
if
(
PopTaskFromQueue
(
thread
,
&
task
))
{
task
->
func
(
task
->
content
,
thread_id
);
if
(
task
->
func
==
NULL
)
{
LOG_ERROR
(
"task->func is nullptr"
);
return
;
}
task
->
func
(
task
->
content
,
thread_id
);
atomic_fetch_sub_explicit
(
&
thread
->
task_size
,
1
,
memory_order_relaxed
);
// atomic_store_explicit(&thread->task_size, thread->task_size - 1, memory_order_relaxed);
spin_count
=
0
;
...
...
model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.cpp
浏览文件 @
73ab2433
...
...
@@ -54,6 +54,8 @@ int MSNetWork::ReleaseNets(void) {
return
0
;
}
const
int
MSNetWork
::
RET_CATEGORY_SUM
=
601
;
const
char
*
MSNetWork
::
labels_name_map
[
MSNetWork
::
RET_CATEGORY_SUM
]
=
{
{
"Tortoise"
},
{
"Container"
},
{
"Magpie"
},
{
"Seaturtle"
},
{
"Football"
},
{
"Ambulance"
},
{
"Ladder"
},
{
"Toothbrush"
},
{
"Syringe"
},
{
"Sink"
},
{
"Toy"
},
{
"Organ(MusicalInstrument) "
},
{
"Cassettedeck"
},
...
...
model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h
浏览文件 @
73ab2433
...
...
@@ -32,7 +32,6 @@
#include <memory>
#include <utility>
struct
ImgDims
{
int
channel
=
0
;
int
width
=
0
;
...
...
@@ -43,8 +42,6 @@ struct ImgDims {
std::shared_ptr<mindspore::session::LiteSession> sess = nullptr;
};*/
class
MSNetWork
{
public:
MSNetWork
();
...
...
@@ -55,10 +52,10 @@ class MSNetWork {
int
ReleaseNets
(
void
);
private:
mindspore
::
session
::
LiteSession
*
session
;
mindspore
::
lite
::
Model
*
model
;
static
const
int
RET_CATEGORY_SUM
=
601
;
static
const
int
RET_CATEGORY_SUM
;
static
const
char
*
labels_name_map
[
RET_CATEGORY_SUM
];
};
#endif
model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp
浏览文件 @
73ab2433
...
...
@@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) {
imgFloatTmp
.
convertTo
(
imgResized256
,
CV_32FC3
,
normalizMin
/
normalizMax
);
int
offsetX
=
16
;
int
offsetY
=
16
;
int
cropWidth
=
224
;
int
cropHeight
=
224
;
const
int
offsetX
=
16
;
const
int
offsetY
=
16
;
const
int
cropWidth
=
224
;
const
int
cropHeight
=
224
;
// Standardization processing.
float
meanR
=
0.485
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录