Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleX
提交
d44249da
P
PaddleX
项目概览
PaddlePaddle
/
PaddleX
通知
138
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
43
列表
看板
标记
里程碑
合并请求
5
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleX
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
43
Issue
43
列表
看板
标记
里程碑
合并请求
5
合并请求
5
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d44249da
编写于
7月 22, 2020
作者:
P
PowersMrLi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
1、Changed batch predict result var names;2、Pre allocate memory for result vector
上级
5c6e671e
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
33 addition
and
29 deletion
+33
-29
deploy/cpp/include/paddlex/paddlex.h
deploy/cpp/include/paddlex/paddlex.h
+2
-2
deploy/cpp/src/paddlex.cpp
deploy/cpp/src/paddlex.cpp
+31
-27
未找到文件。
deploy/cpp/include/paddlex/paddlex.h
浏览文件 @
d44249da
...
...
@@ -175,7 +175,7 @@ class Model {
* @return true if predict successfully
* */
bool
predict
(
const
std
::
vector
<
cv
::
Mat
>
&
im_batch
,
std
::
vector
<
DetResult
>
*
result
,
std
::
vector
<
DetResult
>
*
result
s
,
int
thread_num
=
1
);
/*
...
...
@@ -201,7 +201,7 @@ class Model {
* @return true if predict successfully
* */
bool
predict
(
const
std
::
vector
<
cv
::
Mat
>
&
im_batch
,
std
::
vector
<
SegResult
>
*
result
,
std
::
vector
<
SegResult
>
*
result
s
,
int
thread_num
=
1
);
// model type, include 3 type: classifier, detector, segmenter
...
...
deploy/cpp/src/paddlex.cpp
浏览文件 @
d44249da
...
...
@@ -225,6 +225,8 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
outputs_
.
resize
(
size
);
output_tensor
->
copy_to_cpu
(
outputs_
.
data
());
// 对模型输出结果进行后处理
(
*
results
).
clear
();
(
*
results
).
resize
(
batch_size
);
int
single_batch_size
=
size
/
batch_size
;
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
auto
start_ptr
=
std
::
begin
(
outputs_
);
...
...
@@ -343,7 +345,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
}
bool
Model
::
predict
(
const
std
::
vector
<
cv
::
Mat
>&
im_batch
,
std
::
vector
<
DetResult
>*
result
,
std
::
vector
<
DetResult
>*
result
s
,
int
thread_num
)
{
for
(
auto
&
inputs
:
inputs_batch_
)
{
inputs
.
clear
();
...
...
@@ -467,6 +469,8 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
auto
lod_vector
=
output_box_tensor
->
lod
();
int
num_boxes
=
size
/
6
;
// 解析预测框box
(
*
results
).
clear
();
(
*
results
).
resize
(
batch_size
);
for
(
int
i
=
0
;
i
<
lod_vector
[
0
].
size
()
-
1
;
++
i
)
{
for
(
int
j
=
lod_vector
[
0
][
i
];
j
<
lod_vector
[
0
][
i
+
1
];
++
j
)
{
Box
box
;
...
...
@@ -480,7 +484,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
float
w
=
xmax
-
xmin
+
1
;
float
h
=
ymax
-
ymin
+
1
;
box
.
coordinate
=
{
xmin
,
ymin
,
w
,
h
};
(
*
result
)[
i
].
boxes
.
push_back
(
std
::
move
(
box
));
(
*
result
s
)[
i
].
boxes
.
push_back
(
std
::
move
(
box
));
}
}
...
...
@@ -499,9 +503,9 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
output_mask_tensor
->
copy_to_cpu
(
output_mask
.
data
());
int
mask_idx
=
0
;
for
(
int
i
=
0
;
i
<
lod_vector
[
0
].
size
()
-
1
;
++
i
)
{
(
*
result
)[
i
].
mask_resolution
=
output_mask_shape
[
2
];
for
(
int
j
=
0
;
j
<
(
*
result
)[
i
].
boxes
.
size
();
++
j
)
{
Box
*
box
=
&
(
*
result
)[
i
].
boxes
[
j
];
(
*
result
s
)[
i
].
mask_resolution
=
output_mask_shape
[
2
];
for
(
int
j
=
0
;
j
<
(
*
result
s
)[
i
].
boxes
.
size
();
++
j
)
{
Box
*
box
=
&
(
*
result
s
)[
i
].
boxes
[
j
];
int
category_id
=
box
->
category_id
;
auto
begin_mask
=
output_mask
.
begin
()
+
(
mask_idx
*
classes
+
category_id
)
*
mask_pixels
;
...
...
@@ -624,7 +628,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
}
bool
Model
::
predict
(
const
std
::
vector
<
cv
::
Mat
>&
im_batch
,
std
::
vector
<
SegResult
>*
result
,
std
::
vector
<
SegResult
>*
result
s
,
int
thread_num
)
{
for
(
auto
&
inputs
:
inputs_batch_
)
{
inputs
.
clear
();
...
...
@@ -647,8 +651,8 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
}
int
batch_size
=
im_batch
.
size
();
(
*
result
).
clear
();
(
*
result
).
resize
(
batch_size
);
(
*
result
s
).
clear
();
(
*
result
s
).
resize
(
batch_size
);
int
h
=
inputs_batch_
[
0
].
new_im_size_
[
0
];
int
w
=
inputs_batch_
[
0
].
new_im_size_
[
1
];
auto
im_tensor
=
predictor_
->
GetInputTensor
(
"image"
);
...
...
@@ -680,14 +684,14 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
int
single_batch_size
=
size
/
batch_size
;
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
(
*
result
)[
i
].
label_map
.
data
.
resize
(
single_batch_size
);
(
*
result
)[
i
].
label_map
.
shape
.
push_back
(
1
);
(
*
result
s
)[
i
].
label_map
.
data
.
resize
(
single_batch_size
);
(
*
result
s
)[
i
].
label_map
.
shape
.
push_back
(
1
);
for
(
int
j
=
1
;
j
<
output_label_shape
.
size
();
++
j
)
{
(
*
result
)[
i
].
label_map
.
shape
.
push_back
(
output_label_shape
[
j
]);
(
*
result
s
)[
i
].
label_map
.
shape
.
push_back
(
output_label_shape
[
j
]);
}
std
::
copy
(
output_labels_iter
+
i
*
single_batch_size
,
output_labels_iter
+
(
i
+
1
)
*
single_batch_size
,
(
*
result
)[
i
].
label_map
.
data
.
data
());
(
*
result
s
)[
i
].
label_map
.
data
.
data
());
}
// 获取预测置信度scoremap
...
...
@@ -704,29 +708,29 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
int
single_batch_score_size
=
size
/
batch_size
;
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
(
*
result
)[
i
].
score_map
.
data
.
resize
(
single_batch_score_size
);
(
*
result
)[
i
].
score_map
.
shape
.
push_back
(
1
);
(
*
result
s
)[
i
].
score_map
.
data
.
resize
(
single_batch_score_size
);
(
*
result
s
)[
i
].
score_map
.
shape
.
push_back
(
1
);
for
(
int
j
=
1
;
j
<
output_score_shape
.
size
();
++
j
)
{
(
*
result
)[
i
].
score_map
.
shape
.
push_back
(
output_score_shape
[
j
]);
(
*
result
s
)[
i
].
score_map
.
shape
.
push_back
(
output_score_shape
[
j
]);
}
std
::
copy
(
output_scores_iter
+
i
*
single_batch_score_size
,
output_scores_iter
+
(
i
+
1
)
*
single_batch_score_size
,
(
*
result
)[
i
].
score_map
.
data
.
data
());
(
*
result
s
)[
i
].
score_map
.
data
.
data
());
}
// 解析输出结果到原图大小
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
std
::
vector
<
uint8_t
>
label_map
((
*
result
)[
i
].
label_map
.
data
.
begin
(),
(
*
result
)[
i
].
label_map
.
data
.
end
());
cv
::
Mat
mask_label
((
*
result
)[
i
].
label_map
.
shape
[
1
],
(
*
result
)[
i
].
label_map
.
shape
[
2
],
std
::
vector
<
uint8_t
>
label_map
((
*
result
s
)[
i
].
label_map
.
data
.
begin
(),
(
*
result
s
)[
i
].
label_map
.
data
.
end
());
cv
::
Mat
mask_label
((
*
result
s
)[
i
].
label_map
.
shape
[
1
],
(
*
result
s
)[
i
].
label_map
.
shape
[
2
],
CV_8UC1
,
label_map
.
data
());
cv
::
Mat
mask_score
((
*
result
)[
i
].
score_map
.
shape
[
2
],
(
*
result
)[
i
].
score_map
.
shape
[
3
],
cv
::
Mat
mask_score
((
*
result
s
)[
i
].
score_map
.
shape
[
2
],
(
*
result
s
)[
i
].
score_map
.
shape
[
3
],
CV_32FC1
,
(
*
result
)[
i
].
score_map
.
data
.
data
());
(
*
result
s
)[
i
].
score_map
.
data
.
data
());
int
idx
=
1
;
int
len_postprocess
=
inputs_batch_
[
i
].
im_size_before_resize_
.
size
();
for
(
std
::
vector
<
std
::
string
>::
reverse_iterator
iter
=
...
...
@@ -762,12 +766,12 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
}
++
idx
;
}
(
*
result
)[
i
].
label_map
.
data
.
assign
(
mask_label
.
begin
<
uint8_t
>
(),
(
*
result
s
)[
i
].
label_map
.
data
.
assign
(
mask_label
.
begin
<
uint8_t
>
(),
mask_label
.
end
<
uint8_t
>
());
(
*
result
)[
i
].
label_map
.
shape
=
{
mask_label
.
rows
,
mask_label
.
cols
};
(
*
result
)[
i
].
score_map
.
data
.
assign
(
mask_score
.
begin
<
float
>
(),
(
*
result
s
)[
i
].
label_map
.
shape
=
{
mask_label
.
rows
,
mask_label
.
cols
};
(
*
result
s
)[
i
].
score_map
.
data
.
assign
(
mask_score
.
begin
<
float
>
(),
mask_score
.
end
<
float
>
());
(
*
result
)[
i
].
score_map
.
shape
=
{
mask_score
.
rows
,
mask_score
.
cols
};
(
*
result
s
)[
i
].
score_map
.
shape
=
{
mask_score
.
rows
,
mask_score
.
cols
};
}
return
true
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录