Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
55495b69
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
接近 2 年 前同步成功
通知
116
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
55495b69
编写于
9月 24, 2021
作者:
S
stephon
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
sRecognition Serving: support det and rec pipeline
上级
ccda54d7
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
152 addition
and
33 deletion
+152
-33
deploy/paddleserving/recognition/config.yml
deploy/paddleserving/recognition/config.yml
+12
-2
deploy/paddleserving/recognition/label_list.txt
deploy/paddleserving/recognition/label_list.txt
+2
-0
deploy/paddleserving/recognition/pipeline_http_client.py
deploy/paddleserving/recognition/pipeline_http_client.py
+2
-2
deploy/paddleserving/recognition/pipeline_rpc_client.py
deploy/paddleserving/recognition/pipeline_rpc_client.py
+1
-1
deploy/paddleserving/recognition/recognition_web_service.py
deploy/paddleserving/recognition/recognition_web_service.py
+135
-28
未找到文件。
deploy/paddleserving/recognition/config.yml
浏览文件 @
55495b69
...
@@ -10,7 +10,7 @@ dag:
...
@@ -10,7 +10,7 @@ dag:
#op资源类型, True, 为线程模型;False,为进程模型
#op资源类型, True, 为线程模型;False,为进程模型
is_thread_op
:
False
is_thread_op
:
False
op
:
op
:
rec
og
:
rec
:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency
:
1
concurrency
:
1
...
@@ -30,4 +30,14 @@ op:
...
@@ -30,4 +30,14 @@ op:
client_type
:
local_predictor
client_type
:
local_predictor
#Fetch结果列表,以client_config中fetch_var的alias_name为准
#Fetch结果列表,以client_config中fetch_var的alias_name为准
fetch_list
:
[
"
features"
]
fetch_list
:
[
"
features"
]
\ No newline at end of file
det
:
concurrency
:
1
local_service_conf
:
client_type
:
local_predictor
device_type
:
1
devices
:
'
0'
fetch_list
:
-
save_infer_model/scale_0.tmp_1
model_config
:
../../models/ppyolov2_r50vd_dcn_mainbody_v1.0_serving/
\ No newline at end of file
deploy/paddleserving/recognition/label_list.txt
0 → 100644
浏览文件 @
55495b69
foreground
background
\ No newline at end of file
deploy/paddleserving/recognition/pipeline_http_client.py
浏览文件 @
55495b69
...
@@ -9,13 +9,13 @@ def cv2_to_base64(image):
...
@@ -9,13 +9,13 @@ def cv2_to_base64(image):
return
base64
.
b64encode
(
image
).
decode
(
'utf8'
)
return
base64
.
b64encode
(
image
).
decode
(
'utf8'
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
url
=
"http://127.0.0.1:18081/recog
_service
/prediction"
url
=
"http://127.0.0.1:18081/recog
nition
/prediction"
with
open
(
os
.
path
.
join
(
"."
,
imgpath
),
'rb'
)
as
file
:
with
open
(
os
.
path
.
join
(
"."
,
imgpath
),
'rb'
)
as
file
:
image_data1
=
file
.
read
()
image_data1
=
file
.
read
()
image
=
cv2_to_base64
(
image_data1
)
image
=
cv2_to_base64
(
image_data1
)
data
=
{
"key"
:
[
"image"
],
"value"
:
[
image
]}
data
=
{
"key"
:
[
"image"
],
"value"
:
[
image
]}
for
i
in
range
(
5
):
for
i
in
range
(
1
):
r
=
requests
.
post
(
url
=
url
,
data
=
json
.
dumps
(
data
))
r
=
requests
.
post
(
url
=
url
,
data
=
json
.
dumps
(
data
))
print
(
r
.
json
())
print
(
r
.
json
())
deploy/paddleserving/recognition/pipeline_rpc_client.py
浏览文件 @
55495b69
...
@@ -30,5 +30,5 @@ if __name__ == "__main__":
...
@@ -30,5 +30,5 @@ if __name__ == "__main__":
image
=
cv2_to_base64
(
image_data
)
image
=
cv2_to_base64
(
image_data
)
for
i
in
range
(
1
):
for
i
in
range
(
1
):
ret
=
client
.
predict
(
feed_dict
=
{
"image"
:
image
},
fetch
=
[
"
label"
,
"dis
t"
])
ret
=
client
.
predict
(
feed_dict
=
{
"image"
:
image
},
fetch
=
[
"
resul
t"
])
print
(
ret
)
print
(
ret
)
deploy/paddleserving/recognition/recognition_web_service.py
浏览文件 @
55495b69
...
@@ -11,20 +11,81 @@
...
@@ -11,20 +11,81 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
sys
from
paddle_serving_server.web_service
import
WebService
,
Op
from
paddle_serving_app.reader
import
Sequential
,
URL2Image
,
Resize
,
CenterCrop
,
RGB2BGR
,
Transpose
,
Div
,
Normalize
,
Base64ToImage
try
:
from
paddle_serving_server_gpu.web_service
import
WebService
,
Op
except
ImportError
:
from
paddle_serving_server.web_service
import
WebService
,
Op
import
logging
import
logging
import
numpy
as
np
import
numpy
as
np
import
base64
,
cv2
import
sys
import
cv2
from
paddle_serving_app.reader
import
*
import
base64
import
os
import
os
import
faiss
import
faiss
import
pickle
import
pickle
import
json
class
RecogOp
(
Op
):
class
DetOp
(
Op
):
def
init_op
(
self
):
self
.
img_preprocess
=
Sequential
([
BGR2RGB
(),
Div
(
255.0
),
Normalize
([
0.485
,
0.456
,
0.406
],
[
0.229
,
0.224
,
0.225
],
False
),
Resize
((
640
,
640
)),
Transpose
((
2
,
0
,
1
))
])
self
.
img_postprocess
=
RCNNPostprocess
(
"label_list.txt"
,
"output"
)
self
.
threshold
=
0.2
self
.
max_det_results
=
5
def
generate_scale
(
self
,
im
):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
target_size
=
[
640
,
640
]
origin_shape
=
im
.
shape
[:
2
]
resize_h
,
resize_w
=
target_size
im_scale_y
=
resize_h
/
float
(
origin_shape
[
0
])
im_scale_x
=
resize_w
/
float
(
origin_shape
[
1
])
return
im_scale_y
,
im_scale_x
def
preprocess
(
self
,
input_dicts
,
data_id
,
log_id
):
(
_
,
input_dict
),
=
input_dicts
.
items
()
imgs
=
[]
raw_imgs
=
[]
for
key
in
input_dict
.
keys
():
data
=
base64
.
b64decode
(
input_dict
[
key
].
encode
(
'utf8'
))
raw_imgs
.
append
(
data
)
data
=
np
.
fromstring
(
data
,
np
.
uint8
)
raw_im
=
cv2
.
imdecode
(
data
,
cv2
.
IMREAD_COLOR
)
im_scale_y
,
im_scale_x
=
self
.
generate_scale
(
raw_im
)
im
=
self
.
img_preprocess
(
raw_im
)
imgs
.
append
({
"image"
:
im
[
np
.
newaxis
,
:],
"im_shape"
:
np
.
array
(
list
(
im
.
shape
[
1
:])).
reshape
(
-
1
)[
np
.
newaxis
,:],
"scale_factor"
:
np
.
array
([
im_scale_y
,
im_scale_x
]).
astype
(
'float32'
),
})
self
.
raw_img
=
raw_imgs
feed_dict
=
{
"image"
:
np
.
concatenate
([
x
[
"image"
]
for
x
in
imgs
],
axis
=
0
),
"im_shape"
:
np
.
concatenate
([
x
[
"im_shape"
]
for
x
in
imgs
],
axis
=
0
),
"scale_factor"
:
np
.
concatenate
([
x
[
"scale_factor"
]
for
x
in
imgs
],
axis
=
0
)
}
return
feed_dict
,
False
,
None
,
""
def
postprocess
(
self
,
input_dicts
,
fetch_dict
,
log_id
):
boxes
=
self
.
img_postprocess
(
fetch_dict
,
visualize
=
False
)
boxes
.
sort
(
key
=
lambda
x
:
x
[
"score"
],
reverse
=
True
)
boxes
=
filter
(
lambda
x
:
x
[
"score"
]
>=
self
.
threshold
,
boxes
[:
self
.
max_det_results
])
result
=
json
.
dumps
(
list
(
boxes
))
res_dict
=
{
"bbox_result"
:
result
,
"image"
:
self
.
raw_img
}
return
res_dict
,
None
,
""
class
RecOp
(
Op
):
def
init_op
(
self
):
def
init_op
(
self
):
self
.
seq
=
Sequential
([
self
.
seq
=
Sequential
([
Resize
(
256
),
CenterCrop
(
224
),
RGB2BGR
(),
Transpose
((
2
,
0
,
1
)),
Resize
(
256
),
CenterCrop
(
224
),
RGB2BGR
(),
Transpose
((
2
,
0
,
1
)),
...
@@ -32,7 +93,6 @@ class RecogOp(Op):
...
@@ -32,7 +93,6 @@ class RecogOp(Op):
True
)
True
)
])
])
#load index; and return top1
index_dir
=
"../../recognition_demo_data_v1.1/gallery_product/index"
index_dir
=
"../../recognition_demo_data_v1.1/gallery_product/index"
assert
os
.
path
.
exists
(
os
.
path
.
join
(
assert
os
.
path
.
exists
(
os
.
path
.
join
(
index_dir
,
"vector.index"
)),
"vector.index not found ..."
index_dir
,
"vector.index"
)),
"vector.index not found ..."
...
@@ -45,35 +105,82 @@ class RecogOp(Op):
...
@@ -45,35 +105,82 @@ class RecogOp(Op):
with
open
(
os
.
path
.
join
(
index_dir
,
"id_map.pkl"
),
"rb"
)
as
fd
:
with
open
(
os
.
path
.
join
(
index_dir
,
"id_map.pkl"
),
"rb"
)
as
fd
:
self
.
id_map
=
pickle
.
load
(
fd
)
self
.
id_map
=
pickle
.
load
(
fd
)
self
.
rec_nms_thresold
=
0.05
self
.
rec_score_thres
=
0.5
def
preprocess
(
self
,
input_dicts
,
data_id
,
log_id
):
def
preprocess
(
self
,
input_dicts
,
data_id
,
log_id
):
(
_
,
input_dict
),
=
input_dicts
.
items
()
(
_
,
input_dict
),
=
input_dicts
.
items
()
batch_size
=
len
(
input_dict
.
keys
())
raw_img
=
input_dict
[
"image"
][
0
]
data
=
np
.
frombuffer
(
raw_img
,
np
.
uint8
)
origin_img
=
cv2
.
imdecode
(
data
,
cv2
.
IMREAD_COLOR
)
dt_boxes
=
input_dict
[
"bbox_result"
]
boxes
=
json
.
loads
(
dt_boxes
)
boxes
.
append
({
"category_id"
:
0
,
"score"
:
1.0
,
"bbox"
:
[
0
,
0
,
origin_img
.
shape
[
1
],
origin_img
.
shape
[
0
]]
})
self
.
det_boxes
=
boxes
#construct batch images for rec
imgs
=
[]
imgs
=
[]
for
key
in
input_dict
.
keys
():
for
box
in
boxes
:
data
=
base64
.
b64decode
(
input_dict
[
key
].
encode
(
'utf8'
))
box
=
[
int
(
x
)
for
x
in
box
[
"bbox"
]]
data
=
np
.
fromstring
(
data
,
np
.
uint8
)
im
=
origin_img
[
box
[
1
]:
box
[
1
]
+
box
[
3
],
box
[
0
]:
box
[
0
]
+
box
[
2
]].
copy
()
im
=
cv2
.
imdecode
(
data
,
cv2
.
IMREAD_COLOR
)
img
=
self
.
seq
(
im
)
img
=
self
.
seq
(
im
)
imgs
.
append
(
img
[
np
.
newaxis
,
:].
copy
())
imgs
.
append
(
img
[
np
.
newaxis
,
:].
copy
())
input_imgs
=
np
.
concatenate
(
imgs
,
axis
=
0
)
input_imgs
=
np
.
concatenate
(
imgs
,
axis
=
0
)
return
{
"x"
:
input_imgs
},
False
,
None
,
""
return
{
"x"
:
input_imgs
},
False
,
None
,
""
def
nms_to_rec_results
(
self
,
results
,
thresh
=
0.1
):
filtered_results
=
[]
x1
=
np
.
array
([
r
[
"bbox"
][
0
]
for
r
in
results
]).
astype
(
"float32"
)
y1
=
np
.
array
([
r
[
"bbox"
][
1
]
for
r
in
results
]).
astype
(
"float32"
)
x2
=
np
.
array
([
r
[
"bbox"
][
2
]
for
r
in
results
]).
astype
(
"float32"
)
y2
=
np
.
array
([
r
[
"bbox"
][
3
]
for
r
in
results
]).
astype
(
"float32"
)
scores
=
np
.
array
([
r
[
"rec_scores"
]
for
r
in
results
])
areas
=
(
x2
-
x1
+
1
)
*
(
y2
-
y1
+
1
)
order
=
scores
.
argsort
()[::
-
1
]
while
order
.
size
>
0
:
i
=
order
[
0
]
xx1
=
np
.
maximum
(
x1
[
i
],
x1
[
order
[
1
:]])
yy1
=
np
.
maximum
(
y1
[
i
],
y1
[
order
[
1
:]])
xx2
=
np
.
minimum
(
x2
[
i
],
x2
[
order
[
1
:]])
yy2
=
np
.
minimum
(
y2
[
i
],
y2
[
order
[
1
:]])
w
=
np
.
maximum
(
0.0
,
xx2
-
xx1
+
1
)
h
=
np
.
maximum
(
0.0
,
yy2
-
yy1
+
1
)
inter
=
w
*
h
ovr
=
inter
/
(
areas
[
i
]
+
areas
[
order
[
1
:]]
-
inter
)
inds
=
np
.
where
(
ovr
<=
thresh
)[
0
]
order
=
order
[
inds
+
1
]
filtered_results
.
append
(
results
[
i
])
return
filtered_results
def
postprocess
(
self
,
input_dicts
,
fetch_dict
,
log_id
):
def
postprocess
(
self
,
input_dicts
,
fetch_dict
,
log_id
):
score_list
=
fetch_dict
[
"features"
]
score_list
=
fetch_dict
[
"features"
]
scores
,
docs
=
self
.
searcher
.
search
(
score_list
,
1
)
return_top_k
=
1
scores
,
docs
=
self
.
searcher
.
search
(
score_list
,
return_top_k
)
result
=
{}
results
=
[]
result
[
"label"
]
=
self
.
id_map
[
docs
[
0
][
0
]].
split
()[
1
]
for
i
in
range
(
scores
.
shape
[
0
]):
result
[
"dist"
]
=
str
(
scores
[
0
][
0
])
pred
=
{}
return
result
,
None
,
""
if
scores
[
i
][
0
]
>=
self
.
rec_score_thres
:
pred
[
"bbox"
]
=
self
.
det_boxes
[
i
][
"bbox"
]
pred
[
"rec_docs"
]
=
self
.
id_map
[
docs
[
i
][
0
]].
split
()[
1
]
pred
[
"rec_scores"
]
=
scores
[
i
][
0
]
results
.
append
(
pred
)
#do nms
results
=
self
.
nms_to_rec_results
(
results
,
self
.
rec_nms_thresold
)
return
{
"result"
:
str
(
results
)},
None
,
""
class
Product
RecognitionService
(
WebService
):
class
RecognitionService
(
WebService
):
def
get_pipeline_response
(
self
,
read_op
):
def
get_pipeline_response
(
self
,
read_op
):
image_op
=
RecogOp
(
name
=
"recog"
,
input_ops
=
[
read_op
])
det_op
=
DetOp
(
name
=
"det"
,
input_ops
=
[
read_op
])
return
image_op
rec_op
=
RecOp
(
name
=
"rec"
,
input_ops
=
[
det_op
])
return
rec_op
uci_service
=
ProductRecognitionService
(
name
=
"recog_service
"
)
product_recog_service
=
RecognitionService
(
name
=
"recognition
"
)
uci
_service
.
prepare_pipeline_config
(
"config.yml"
)
product_recog
_service
.
prepare_pipeline_config
(
"config.yml"
)
uci
_service
.
run_service
()
product_recog
_service
.
run_service
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录