Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleX
提交
151ae40b
P
PaddleX
项目概览
PaddlePaddle
/
PaddleX
通知
138
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
43
列表
看板
标记
里程碑
合并请求
5
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleX
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
43
Issue
43
列表
看板
标记
里程碑
合并请求
5
合并请求
5
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
151ae40b
编写于
8月 29, 2020
作者:
S
syyxsxx
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change chinese annotation to english
上级
2c94ac53
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
80 addition
and
79 deletion
+80
-79
deploy/cpp/src/paddlex.cpp
deploy/cpp/src/paddlex.cpp
+34
-34
deploy/cpp/src/transforms.cpp
deploy/cpp/src/transforms.cpp
+7
-5
deploy/cpp/src/visualize.cpp
deploy/cpp/src/visualize.cpp
+4
-4
deploy/openvino/demo/classifier.cpp
deploy/openvino/demo/classifier.cpp
+2
-2
deploy/openvino/demo/detector.cpp
deploy/openvino/demo/detector.cpp
+3
-3
deploy/openvino/demo/segmenter.cpp
deploy/openvino/demo/segmenter.cpp
+1
-3
deploy/openvino/src/paddlex.cpp
deploy/openvino/src/paddlex.cpp
+8
-7
deploy/openvino/src/transforms.cpp
deploy/openvino/src/transforms.cpp
+3
-3
deploy/openvino/src/visualize.cpp
deploy/openvino/src/visualize.cpp
+4
-4
deploy/raspberry/demo/classifier.cpp
deploy/raspberry/demo/classifier.cpp
+2
-2
deploy/raspberry/demo/detector.cpp
deploy/raspberry/demo/detector.cpp
+3
-3
deploy/raspberry/demo/segmenter.cpp
deploy/raspberry/demo/segmenter.cpp
+1
-1
deploy/raspberry/src/paddlex.cpp
deploy/raspberry/src/paddlex.cpp
+5
-5
deploy/raspberry/src/transforms.cpp
deploy/raspberry/src/transforms.cpp
+3
-3
未找到文件。
deploy/cpp/src/paddlex.cpp
浏览文件 @
151ae40b
...
...
@@ -40,7 +40,7 @@ void Model::create_predictor(const std::string& model_dir,
}
#endif
if
(
yaml_input
==
""
)
{
//
读取配置文件
//
read yaml file
std
::
ifstream
yaml_fin
(
yaml_file
);
yaml_fin
.
seekg
(
0
,
std
::
ios
::
end
);
size_t
yaml_file_size
=
yaml_fin
.
tellg
();
...
...
@@ -48,7 +48,7 @@ void Model::create_predictor(const std::string& model_dir,
yaml_fin
.
seekg
(
0
);
yaml_fin
.
read
(
&
yaml_input
[
0
],
yaml_file_size
);
}
//
读取配置文件内容
//
load yaml file
if
(
!
load_config
(
yaml_input
))
{
std
::
cerr
<<
"Parse file 'model.yml' failed!"
<<
std
::
endl
;
exit
(
-
1
);
...
...
@@ -64,13 +64,13 @@ void Model::create_predictor(const std::string& model_dir,
}
config
.
SwitchUseFeedFetchOps
(
false
);
config
.
SwitchSpecifyInputNames
(
true
);
//
开启图优化
//
enable graph Optim
#if defined(__arm__) || defined(__aarch64__)
config
.
SwitchIrOptim
(
false
);
#else
config
.
SwitchIrOptim
(
use_ir_optim
);
#endif
//
开启内存优化
//
enable Memory Optim
config
.
EnableMemoryOptim
();
if
(
use_trt
)
{
config
.
EnableTensorRtEngine
(
...
...
@@ -108,9 +108,9 @@ bool Model::load_config(const std::string& yaml_input) {
return
false
;
}
}
//
构建数据处理流
//
build data preprocess stream
transforms_
.
Init
(
config
[
"Transforms"
],
to_rgb
);
//
读入
label list
//
read
label list
labels
.
clear
();
for
(
const
auto
&
item
:
config
[
"_Attributes"
][
"labels"
])
{
int
index
=
labels
.
size
();
...
...
@@ -152,19 +152,19 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
"to function predict()!"
<<
std
::
endl
;
return
false
;
}
//
处理输入图像
//
im preprocess
if
(
!
preprocess
(
im
,
&
inputs_
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
}
//
使用加载的模型进行预测
//
predict
auto
in_tensor
=
predictor_
->
GetInputTensor
(
"image"
);
int
h
=
inputs_
.
new_im_size_
[
0
];
int
w
=
inputs_
.
new_im_size_
[
1
];
in_tensor
->
Reshape
({
1
,
3
,
h
,
w
});
in_tensor
->
copy_from_cpu
(
inputs_
.
im_data_
.
data
());
predictor_
->
ZeroCopyRun
();
//
取出模型的输出结果
//
get result
auto
output_names
=
predictor_
->
GetOutputNames
();
auto
output_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
0
]);
std
::
vector
<
int
>
output_shape
=
output_tensor
->
shape
();
...
...
@@ -174,7 +174,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
}
outputs_
.
resize
(
size
);
output_tensor
->
copy_to_cpu
(
outputs_
.
data
());
//
对模型输出结果进行后处理
//
postprocess
auto
ptr
=
std
::
max_element
(
std
::
begin
(
outputs_
),
std
::
end
(
outputs_
));
result
->
category_id
=
std
::
distance
(
std
::
begin
(
outputs_
),
ptr
);
result
->
score
=
*
ptr
;
...
...
@@ -198,12 +198,12 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
return
false
;
}
inputs_batch_
.
assign
(
im_batch
.
size
(),
ImageBlob
());
//
处理输入图像
//
preprocess
if
(
!
preprocess
(
im_batch
,
&
inputs_batch_
,
thread_num
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
}
//
使用加载的模型进行预测
//
predict
int
batch_size
=
im_batch
.
size
();
auto
in_tensor
=
predictor_
->
GetInputTensor
(
"image"
);
int
h
=
inputs_batch_
[
0
].
new_im_size_
[
0
];
...
...
@@ -218,7 +218,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
in_tensor
->
copy_from_cpu
(
inputs_data
.
data
());
// in_tensor->copy_from_cpu(inputs_.im_data_.data());
predictor_
->
ZeroCopyRun
();
//
取出模型的输出结果
//
get result
auto
output_names
=
predictor_
->
GetOutputNames
();
auto
output_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
0
]);
std
::
vector
<
int
>
output_shape
=
output_tensor
->
shape
();
...
...
@@ -228,7 +228,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
}
outputs_
.
resize
(
size
);
output_tensor
->
copy_to_cpu
(
outputs_
.
data
());
//
对模型输出结果进行后处理
//
postprocess
(
*
results
).
clear
();
(
*
results
).
resize
(
batch_size
);
int
single_batch_size
=
size
/
batch_size
;
...
...
@@ -258,7 +258,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
return
false
;
}
//
处理输入图像
//
preprocess
if
(
!
preprocess
(
im
,
&
inputs_
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
...
...
@@ -288,7 +288,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
im_info_tensor
->
copy_from_cpu
(
im_info
);
im_shape_tensor
->
copy_from_cpu
(
im_shape
);
}
//
使用加载的模型进行预测
//
predict
predictor_
->
ZeroCopyRun
();
std
::
vector
<
float
>
output_box
;
...
...
@@ -306,7 +306,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
return
true
;
}
int
num_boxes
=
size
/
6
;
//
解析预测框box
//
box postprocess
for
(
int
i
=
0
;
i
<
num_boxes
;
++
i
)
{
Box
box
;
box
.
category_id
=
static_cast
<
int
>
(
round
(
output_box
[
i
*
6
]));
...
...
@@ -321,7 +321,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
box
.
coordinate
=
{
xmin
,
ymin
,
w
,
h
};
result
->
boxes
.
push_back
(
std
::
move
(
box
));
}
//
实例分割需解析mask
//
mask postprocess
if
(
name
==
"MaskRCNN"
)
{
std
::
vector
<
float
>
output_mask
;
auto
output_mask_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
1
]);
...
...
@@ -366,12 +366,12 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
inputs_batch_
.
assign
(
im_batch
.
size
(),
ImageBlob
());
int
batch_size
=
im_batch
.
size
();
//
处理输入图像
//
preprocess
if
(
!
preprocess
(
im_batch
,
&
inputs_batch_
,
thread_num
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
}
//
对RCNN类模型做批量
padding
//
RCNN model
padding
if
(
batch_size
>
1
)
{
if
(
name
==
"FasterRCNN"
||
name
==
"MaskRCNN"
)
{
int
max_h
=
-
1
;
...
...
@@ -452,10 +452,10 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
im_info_tensor
->
copy_from_cpu
(
im_info
.
data
());
im_shape_tensor
->
copy_from_cpu
(
im_shape
.
data
());
}
//
使用加载的模型进行预测
//
predict
predictor_
->
ZeroCopyRun
();
//
读取所有
box
//
get all
box
std
::
vector
<
float
>
output_box
;
auto
output_names
=
predictor_
->
GetOutputNames
();
auto
output_box_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
0
]);
...
...
@@ -472,7 +472,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
}
auto
lod_vector
=
output_box_tensor
->
lod
();
int
num_boxes
=
size
/
6
;
//
解析预测框box
//
box postprocess
(
*
results
).
clear
();
(
*
results
).
resize
(
batch_size
);
for
(
int
i
=
0
;
i
<
lod_vector
[
0
].
size
()
-
1
;
++
i
)
{
...
...
@@ -492,7 +492,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
}
}
//
实例分割需解析mask
//
mask postprocess
if
(
name
==
"MaskRCNN"
)
{
std
::
vector
<
float
>
output_mask
;
auto
output_mask_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
1
]);
...
...
@@ -537,7 +537,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
return
false
;
}
//
处理输入图像
//
preprocess
if
(
!
preprocess
(
im
,
&
inputs_
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
...
...
@@ -549,10 +549,10 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
im_tensor
->
Reshape
({
1
,
3
,
h
,
w
});
im_tensor
->
copy_from_cpu
(
inputs_
.
im_data_
.
data
());
//
使用加载的模型进行预测
//
predict
predictor_
->
ZeroCopyRun
();
//
获取预测置信度,经过argmax后的
labelmap
//
get
labelmap
auto
output_names
=
predictor_
->
GetOutputNames
();
auto
output_label_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
0
]);
std
::
vector
<
int
>
output_label_shape
=
output_label_tensor
->
shape
();
...
...
@@ -565,7 +565,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result
->
label_map
.
data
.
resize
(
size
);
output_label_tensor
->
copy_to_cpu
(
result
->
label_map
.
data
.
data
());
//
获取预测置信度
scoremap
//
get
scoremap
auto
output_score_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
1
]);
std
::
vector
<
int
>
output_score_shape
=
output_score_tensor
->
shape
();
size
=
1
;
...
...
@@ -577,7 +577,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result
->
score_map
.
data
.
resize
(
size
);
output_score_tensor
->
copy_to_cpu
(
result
->
score_map
.
data
.
data
());
//
解析输出结果到原图大小
//
get origin image result
std
::
vector
<
uint8_t
>
label_map
(
result
->
label_map
.
data
.
begin
(),
result
->
label_map
.
data
.
end
());
cv
::
Mat
mask_label
(
result
->
label_map
.
shape
[
1
],
...
...
@@ -647,7 +647,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
return
false
;
}
//
处理输入图像
//
preprocess
inputs_batch_
.
assign
(
im_batch
.
size
(),
ImageBlob
());
if
(
!
preprocess
(
im_batch
,
&
inputs_batch_
,
thread_num
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
...
...
@@ -670,10 +670,10 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
im_tensor
->
copy_from_cpu
(
inputs_data
.
data
());
// im_tensor->copy_from_cpu(inputs_.im_data_.data());
//
使用加载的模型进行预测
//
predict
predictor_
->
ZeroCopyRun
();
//
获取预测置信度,经过argmax后的
labelmap
//
get
labelmap
auto
output_names
=
predictor_
->
GetOutputNames
();
auto
output_label_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
0
]);
std
::
vector
<
int
>
output_label_shape
=
output_label_tensor
->
shape
();
...
...
@@ -698,7 +698,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
(
*
results
)[
i
].
label_map
.
data
.
data
());
}
//
获取预测置信度
scoremap
//
get
scoremap
auto
output_score_tensor
=
predictor_
->
GetOutputTensor
(
output_names
[
1
]);
std
::
vector
<
int
>
output_score_shape
=
output_score_tensor
->
shape
();
size
=
1
;
...
...
@@ -722,7 +722,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
(
*
results
)[
i
].
score_map
.
data
.
data
());
}
//
解析输出结果到原图大小
//
get origin image result
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
std
::
vector
<
uint8_t
>
label_map
((
*
results
)[
i
].
label_map
.
data
.
begin
(),
(
*
results
)[
i
].
label_map
.
data
.
end
());
...
...
deploy/cpp/src/transforms.cpp
浏览文件 @
151ae40b
...
...
@@ -12,12 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/paddlex/transforms.h"
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <math.h>
#include "include/paddlex/transforms.h"
namespace
PaddleX
{
...
...
@@ -195,7 +197,7 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
}
bool
Transforms
::
Run
(
cv
::
Mat
*
im
,
ImageBlob
*
data
)
{
//
按照transforms中预处理算子顺序处理图像
//
do all preprocess ops by order
if
(
to_rgb_
)
{
cv
::
cvtColor
(
*
im
,
*
im
,
cv
::
COLOR_BGR2RGB
);
}
...
...
@@ -211,8 +213,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
}
}
//
将图像由NHWC转为NCHW格式
//
同时转为连续的内存块存储到
ImageBlob
//
data format NHWC to NCHW
//
img data save to
ImageBlob
int
h
=
im
->
rows
;
int
w
=
im
->
cols
;
int
c
=
im
->
channels
();
...
...
deploy/cpp/src/visualize.cpp
浏览文件 @
151ae40b
...
...
@@ -47,7 +47,7 @@ cv::Mat Visualize(const cv::Mat& img,
boxes
[
i
].
coordinate
[
2
],
boxes
[
i
].
coordinate
[
3
]);
//
生成预测框和标题
//
draw box and title
std
::
string
text
=
boxes
[
i
].
category
;
int
c1
=
colormap
[
3
*
boxes
[
i
].
category_id
+
0
];
int
c2
=
colormap
[
3
*
boxes
[
i
].
category_id
+
1
];
...
...
@@ -63,13 +63,13 @@ cv::Mat Visualize(const cv::Mat& img,
origin
.
x
=
roi
.
x
;
origin
.
y
=
roi
.
y
;
//
生成预测框标题的背景
//
background
cv
::
Rect
text_back
=
cv
::
Rect
(
boxes
[
i
].
coordinate
[
0
],
boxes
[
i
].
coordinate
[
1
]
-
text_size
.
height
,
text_size
.
width
,
text_size
.
height
);
//
绘图和文字
//
draw
cv
::
rectangle
(
vis_img
,
roi
,
roi_color
,
2
);
cv
::
rectangle
(
vis_img
,
text_back
,
roi_color
,
-
1
);
cv
::
putText
(
vis_img
,
...
...
@@ -80,7 +80,7 @@ cv::Mat Visualize(const cv::Mat& img,
cv
::
Scalar
(
255
,
255
,
255
),
thickness
);
//
生成实例分割
mask
// mask
if
(
boxes
[
i
].
mask
.
data
.
size
()
==
0
)
{
continue
;
}
...
...
deploy/openvino/demo/classifier.cpp
浏览文件 @
151ae40b
...
...
@@ -44,11 +44,11 @@ int main(int argc, char** argv) {
return
-
1
;
}
//
加载模型
//
load model
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_cfg_file
,
FLAGS_device
);
//
进行预测
//
predict
if
(
FLAGS_image_list
!=
""
)
{
std
::
ifstream
inf
(
FLAGS_image_list
);
if
(
!
inf
)
{
...
...
deploy/openvino/demo/detector.cpp
浏览文件 @
151ae40b
...
...
@@ -54,13 +54,13 @@ int main(int argc, char** argv) {
return
-
1
;
}
//
//
load model
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_cfg_file
,
FLAGS_device
);
int
imgs
=
1
;
auto
colormap
=
PaddleX
::
GenerateColorMap
(
model
.
labels
.
size
());
//
进行预测
//
predict
if
(
FLAGS_image_list
!=
""
)
{
std
::
ifstream
inf
(
FLAGS_image_list
);
if
(
!
inf
)
{
...
...
@@ -96,7 +96,7 @@ int main(int argc, char** argv) {
<<
result
.
boxes
[
i
].
coordinate
[
3
]
<<
")"
<<
std
::
endl
;
}
if
(
FLAGS_save_dir
!=
""
)
{
//
可视化
//
visualize
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
(
im
,
result
,
model
.
labels
,
colormap
,
FLAGS_threshold
);
std
::
string
save_path
=
...
...
deploy/openvino/demo/segmenter.cpp
浏览文件 @
151ae40b
...
...
@@ -48,11 +48,9 @@ int main(int argc, char** argv) {
return
-
1
;
}
//
std
::
cout
<<
"init start"
<<
std
::
endl
;
// load model
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_cfg_file
,
FLAGS_device
);
std
::
cout
<<
"init done"
<<
std
::
endl
;
int
imgs
=
1
;
auto
colormap
=
PaddleX
::
GenerateColorMap
(
model
.
labels
.
size
());
...
...
deploy/openvino/src/paddlex.cpp
浏览文件 @
151ae40b
...
...
@@ -67,9 +67,9 @@ bool Model::load_config(const std::string& cfg_file) {
return
false
;
}
}
//
构建数据处理流
//
init preprocess ops
transforms_
.
Init
(
config
[
"Transforms"
],
type
,
to_rgb
);
//
读入label lis
//
read label list
for
(
const
auto
&
item
:
config
[
"_Attributes"
][
"labels"
])
{
int
index
=
labels
.
size
();
labels
[
index
]
=
item
.
as
<
std
::
string
>
();
...
...
@@ -98,7 +98,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
<<
std
::
endl
;
return
false
;
}
//
处理输入图像
//
preprocess
InferenceEngine
::
InferRequest
infer_request
=
executable_network_
.
CreateInferRequest
();
std
::
string
input_name
=
network_
.
getInputsInfo
().
begin
()
->
first
;
...
...
@@ -109,6 +109,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
return
false
;
}
// predict
infer_request
.
Infer
();
std
::
string
output_name
=
network_
.
getOutputsInfo
().
begin
()
->
first
;
...
...
@@ -118,7 +119,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
auto
moutputHolder
=
moutput
->
rmap
();
float
*
outputs_data
=
moutputHolder
.
as
<
float
*>
();
//
对模型输出结果进行后处理
//
post process
auto
ptr
=
std
::
max_element
(
outputs_data
,
outputs_data
+
sizeof
(
outputs_data
));
result
->
category_id
=
std
::
distance
(
outputs_data
,
ptr
);
result
->
score
=
*
ptr
;
...
...
@@ -206,20 +207,20 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
"function predict()!"
<<
std
::
endl
;
return
false
;
}
//
//
init infer
InferenceEngine
::
InferRequest
infer_request
=
executable_network_
.
CreateInferRequest
();
std
::
string
input_name
=
network_
.
getInputsInfo
().
begin
()
->
first
;
inputs_
.
blob
=
infer_request
.
GetBlob
(
input_name
);
//
//
preprocess
cv
::
Mat
im_clone
=
im
.
clone
();
if
(
!
preprocess
(
&
im_clone
,
&
inputs_
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
}
//
//
predict
infer_request
.
Infer
();
InferenceEngine
::
OutputsDataMap
out_map
=
network_
.
getOutputsInfo
();
...
...
deploy/openvino/src/transforms.cpp
浏览文件 @
151ae40b
...
...
@@ -201,7 +201,7 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
}
bool
Transforms
::
Run
(
cv
::
Mat
*
im
,
ImageBlob
*
data
)
{
//
按照transforms中预处理算子顺序处理图像
//
preprocess by order
if
(
to_rgb_
)
{
cv
::
cvtColor
(
*
im
,
*
im
,
cv
::
COLOR_BGR2RGB
);
}
...
...
@@ -224,8 +224,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
}
}
//
将图像由NHWC转为NCHW格式
//
同时转为连续的内存块存储到
Blob
//
image format NHWC to NCHW
//
img data save to Image
Blob
InferenceEngine
::
SizeVector
blobSize
=
data
->
blob
->
getTensorDesc
().
getDims
();
const
size_t
width
=
blobSize
[
3
];
const
size_t
height
=
blobSize
[
2
];
...
...
deploy/openvino/src/visualize.cpp
浏览文件 @
151ae40b
...
...
@@ -47,7 +47,7 @@ cv::Mat Visualize(const cv::Mat& img,
boxes
[
i
].
coordinate
[
2
],
boxes
[
i
].
coordinate
[
3
]);
//
生成预测框和标题
//
draw box and title
std
::
string
text
=
boxes
[
i
].
category
;
int
c1
=
colormap
[
3
*
boxes
[
i
].
category_id
+
0
];
int
c2
=
colormap
[
3
*
boxes
[
i
].
category_id
+
1
];
...
...
@@ -63,13 +63,13 @@ cv::Mat Visualize(const cv::Mat& img,
origin
.
x
=
roi
.
x
;
origin
.
y
=
roi
.
y
;
//
生成预测框标题的背景
//
background
cv
::
Rect
text_back
=
cv
::
Rect
(
boxes
[
i
].
coordinate
[
0
],
boxes
[
i
].
coordinate
[
1
]
-
text_size
.
height
,
text_size
.
width
,
text_size
.
height
);
//
绘图和文字
//
draw
cv
::
rectangle
(
vis_img
,
roi
,
roi_color
,
2
);
cv
::
rectangle
(
vis_img
,
text_back
,
roi_color
,
-
1
);
cv
::
putText
(
vis_img
,
...
...
@@ -80,7 +80,7 @@ cv::Mat Visualize(const cv::Mat& img,
cv
::
Scalar
(
255
,
255
,
255
),
thickness
);
//
生成实例分割
mask
// mask
if
(
boxes
[
i
].
mask
.
data
.
size
()
==
0
)
{
continue
;
}
...
...
deploy/raspberry/demo/classifier.cpp
浏览文件 @
151ae40b
...
...
@@ -44,11 +44,11 @@ int main(int argc, char** argv) {
return
-
1
;
}
//
加载模型
//
load model
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_cfg_file
,
FLAGS_thread_num
);
std
::
cout
<<
"init is done"
<<
std
::
endl
;
//
进行预测
//
predict
if
(
FLAGS_image_list
!=
""
)
{
std
::
ifstream
inf
(
FLAGS_image_list
);
if
(
!
inf
)
{
...
...
deploy/raspberry/demo/detector.cpp
浏览文件 @
151ae40b
...
...
@@ -54,13 +54,13 @@ int main(int argc, char** argv) {
return
-
1
;
}
//
//
load model
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_cfg_file
,
FLAGS_thread_num
);
int
imgs
=
1
;
auto
colormap
=
PaddleX
::
GenerateColorMap
(
model
.
labels
.
size
());
//
进行预测
//
predict
if
(
FLAGS_image_list
!=
""
)
{
std
::
ifstream
inf
(
FLAGS_image_list
);
if
(
!
inf
)
{
...
...
@@ -97,7 +97,7 @@ int main(int argc, char** argv) {
<<
result
.
boxes
[
i
].
coordinate
[
3
]
<<
")"
<<
std
::
endl
;
}
if
(
FLAGS_save_dir
!=
""
)
{
//
可视化
//
visualize
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
(
im
,
result
,
model
.
labels
,
colormap
,
FLAGS_threshold
);
std
::
string
save_path
=
...
...
deploy/raspberry/demo/segmenter.cpp
浏览文件 @
151ae40b
...
...
@@ -47,7 +47,7 @@ int main(int argc, char** argv) {
return
-
1
;
}
//
//
load model
std
::
cout
<<
"init start"
<<
std
::
endl
;
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_cfg_file
,
FLAGS_thread_num
);
...
...
deploy/raspberry/src/paddlex.cpp
浏览文件 @
151ae40b
...
...
@@ -46,9 +46,9 @@ bool Model::load_config(const std::string& cfg_file) {
return
false
;
}
}
//
构建数据处理流
//
init preprocess ops
transforms_
.
Init
(
config
[
"Transforms"
],
to_rgb
);
//
读入label lis
//
read label list
for
(
const
auto
&
item
:
config
[
"_Attributes"
][
"labels"
])
{
int
index
=
labels
.
size
();
labels
[
index
]
=
item
.
as
<
std
::
string
>
();
...
...
@@ -77,14 +77,14 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
<<
std
::
endl
;
return
false
;
}
//
处理输入图像
//
preprocess
inputs_
.
input_tensor_
=
std
::
move
(
predictor_
->
GetInput
(
0
));
cv
::
Mat
im_clone
=
im
.
clone
();
if
(
!
preprocess
(
&
im_clone
,
&
inputs_
))
{
std
::
cerr
<<
"Preprocess failed!"
<<
std
::
endl
;
return
false
;
}
// predict
predictor_
->
Run
();
std
::
unique_ptr
<
const
paddle
::
lite_api
::
Tensor
>
output_tensor
(
...
...
@@ -92,7 +92,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
const
float
*
outputs_data
=
output_tensor
->
mutable_data
<
float
>
();
//
对模型输出结果进行后处理
//
postprocess
auto
ptr
=
std
::
max_element
(
outputs_data
,
outputs_data
+
sizeof
(
outputs_data
));
result
->
category_id
=
std
::
distance
(
outputs_data
,
ptr
);
result
->
score
=
*
ptr
;
...
...
deploy/raspberry/src/transforms.cpp
浏览文件 @
151ae40b
...
...
@@ -201,7 +201,7 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
}
bool
Transforms
::
Run
(
cv
::
Mat
*
im
,
ImageBlob
*
data
)
{
//
按照transforms中预处理算子顺序处理图像
//
preprocess by order
if
(
to_rgb_
)
{
cv
::
cvtColor
(
*
im
,
*
im
,
cv
::
COLOR_BGR2RGB
);
}
...
...
@@ -218,8 +218,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
}
}
//
将图像由NHWC转为NCHW格式
//
同时转为连续的内存块存储到
Blob
//
image format NHWC to NCHW
//
img data save to Image
Blob
int
height
=
im
->
rows
;
int
width
=
im
->
cols
;
int
channels
=
im
->
channels
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录