Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
acbf70c5
S
Serving
项目概览
PaddlePaddle
/
Serving
大约 1 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
acbf70c5
编写于
6月 18, 2020
作者:
M
MRXLT
浏览文件
操作
浏览文件
下载
差异文件
add yolov4 demo
上级
f2008a4f
d4183012
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
117 addition
and
67 deletion
+117
-67
core/general-client/src/general_model.cpp
core/general-client/src/general_model.cpp
+22
-33
doc/COMPILE.md
doc/COMPILE.md
+4
-4
doc/COMPILE_CN.md
doc/COMPILE_CN.md
+4
-4
doc/LATEST_PACKAGES.md
doc/LATEST_PACKAGES.md
+9
-9
python/examples/yolov4/README.md
python/examples/yolov4/README.md
+23
-0
python/examples/yolov4/README_CN.md
python/examples/yolov4/README_CN.md
+24
-0
python/examples/yolov4/test_client.py
python/examples/yolov4/test_client.py
+2
-2
python/paddle_serving_app/models/model_list.py
python/paddle_serving_app/models/model_list.py
+1
-1
python/paddle_serving_app/reader/image_reader.py
python/paddle_serving_app/reader/image_reader.py
+9
-1
python/paddle_serving_app/version.py
python/paddle_serving_app/version.py
+1
-1
python/paddle_serving_client/__init__.py
python/paddle_serving_client/__init__.py
+7
-1
python/paddle_serving_client/version.py
python/paddle_serving_client/version.py
+3
-3
python/paddle_serving_server/version.py
python/paddle_serving_server/version.py
+3
-3
python/paddle_serving_server_gpu/version.py
python/paddle_serving_server_gpu/version.py
+3
-3
tools/python_tag.py
tools/python_tag.py
+2
-2
未找到文件。
core/general-client/src/general_model.cpp
浏览文件 @
acbf70c5
...
...
@@ -295,25 +295,23 @@ int PredictorClient::batch_predict(
for
(
auto
&
name
:
fetch_name
)
{
// int idx = _fetch_name_to_idx[name];
if
(
_fetch_name_to_type
[
name
]
==
0
)
{
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
" type int64"
;
model
.
_int64_value_map
[
name
].
resize
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data_size
());
VLOG
(
2
)
<<
"ferch var "
<<
name
<<
"type int64"
;
int
size
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data_size
();
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
model
.
_int64_value_map
[
name
][
i
]
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data
(
i
);
}
model
.
_int64_value_map
[
name
]
=
std
::
vector
<
int64_t
>
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data
().
begin
(),
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data
().
begin
()
+
size
);
}
else
if
(
_fetch_name_to_type
[
name
]
==
1
)
{
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
" type float"
;
model
.
_float_value_map
[
name
].
resize
(
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data_size
());
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
"type float"
;
int
size
=
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data_size
();
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
model
.
_float_value_map
[
name
][
i
]
=
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data
(
i
);
}
model
.
_float_value_map
[
name
]
=
std
::
vector
<
float
>
(
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data
().
begin
(),
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data
().
begin
()
+
size
);
}
else
if
(
_fetch_name_to_type
[
name
]
==
2
)
{
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
" type int32"
;
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
"type int32"
;
int
size
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data_size
();
model
.
_int32_value_map
[
name
]
=
std
::
vector
<
int32_t
>
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data
().
begin
(),
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data
().
begin
()
+
size
);
}
idx
+=
1
;
...
...
@@ -601,31 +599,22 @@ int PredictorClient::numpy_predict(
// int idx = _fetch_name_to_idx[name];
if
(
_fetch_name_to_type
[
name
]
==
0
)
{
VLOG
(
2
)
<<
"ferch var "
<<
name
<<
"type int64"
;
model
.
_int64_value_map
[
name
].
resize
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data_size
());
int
size
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data_size
();
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
model
.
_int64_value_map
[
name
][
i
]
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data
(
i
);
}
model
.
_int64_value_map
[
name
]
=
std
::
vector
<
int64_t
>
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data
().
begin
(),
output
.
insts
(
0
).
tensor_array
(
idx
).
int64_data
().
begin
()
+
size
);
}
else
if
(
_fetch_name_to_type
[
name
]
==
1
)
{
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
"type float"
;
model
.
_float_value_map
[
name
].
resize
(
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data_size
());
int
size
=
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data_size
();
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
model
.
_float_value_map
[
name
][
i
]
=
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data
(
i
);
}
model
.
_float_value_map
[
name
]
=
std
::
vector
<
float
>
(
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data
().
begin
(),
output
.
insts
(
0
).
tensor_array
(
idx
).
float_data
().
begin
()
+
size
);
}
else
if
(
_fetch_name_to_type
[
name
]
==
2
)
{
VLOG
(
2
)
<<
"fetch var "
<<
name
<<
"type int32"
;
model
.
_int32_value_map
[
name
].
resize
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data_size
());
int
size
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data_size
();
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
model
.
_int64_value_map
[
name
][
i
]
=
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data
(
i
);
}
model
.
_int32_value_map
[
name
]
=
std
::
vector
<
int32_t
>
(
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data
().
begin
(),
output
.
insts
(
0
).
tensor_array
(
idx
).
int_data
().
begin
()
+
size
);
}
idx
+=
1
;
}
...
...
doc/COMPILE.md
浏览文件 @
acbf70c5
...
...
@@ -43,7 +43,7 @@ In the default centos7 image we provide, the Python path is `/usr/bin/python`. I
### Integrated CPU version paddle inference library
```
shell
mkdir
build
&&
cd
build
mkdir
server-build-cpu
&&
cd
server-build-cpu
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DSERVER
=
ON ..
make
-j10
```
...
...
@@ -53,7 +53,7 @@ you can execute `make install` to put targets under directory `./output`, you ne
### Integrated GPU version paddle inference library
```
shell
mkdir
build
&&
cd
build
mkdir
server-build-gpu
&&
cd
server-build-gpu
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DSERVER
=
ON
-DWITH_GPU
=
ON ..
make
-j10
```
...
...
@@ -65,7 +65,7 @@ execute `make install` to put targets under directory `./output`
## Compile Client
```
shell
mkdir
build
&&
cd
build
mkdir
client-build
&&
cd
client-
build
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DCLIENT
=
ON ..
make
-j10
```
...
...
@@ -75,7 +75,7 @@ execute `make install` to put targets under directory `./output`
## Compile the App
```
bash
mkdir
build
&&
cd
build
mkdir
app-build
&&
cd
app-
build
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DAPP
=
ON ..
make
```
...
...
doc/COMPILE_CN.md
浏览文件 @
acbf70c5
...
...
@@ -43,7 +43,7 @@ export PYTHONROOT=/usr/
### 集成CPU版本Paddle Inference Library
```
shell
mkdir
build
&&
cd
build
mkdir
server-build-cpu
&&
cd
server-build-cpu
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DSERVER
=
ON ..
make
-j10
```
...
...
@@ -53,7 +53,7 @@ make -j10
### 集成GPU版本Paddle Inference Library
```
shell
mkdir
build
&&
cd
build
mkdir
server-build-gpu
&&
cd
server-build-gpu
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DSERVER
=
ON
-DWITH_GPU
=
ON ..
make
-j10
```
...
...
@@ -65,7 +65,7 @@ make -j10
## 编译Client部分
```
shell
mkdir
build
&&
cd
build
mkdir
client-build
&&
cd
client-
build
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DCLIENT
=
ON ..
make
-j10
```
...
...
@@ -75,7 +75,7 @@ make -j10
## 编译App部分
```
bash
mkdir
build
&&
cd
build
mkdir
app-build
&&
cd
app-
build
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
$PYTHONROOT
/bin/python
-DCMAKE_INSTALL_PREFIX
=
./output
-DAPP
=
ON ..
make
```
...
...
doc/LATEST_PACKAGES.md
浏览文件 @
acbf70c5
...
...
@@ -3,45 +3,45 @@
## CPU server
### Python 3
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.
0
-py3-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.
1
-py3-none-any.whl
```
### Python 2
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.
0
-py2-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.
1
-py2-none-any.whl
```
## GPU server
### Python 3
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.
0
-py3-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.
1
-py3-none-any.whl
```
### Python 2
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.
0
-py2-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.
1
-py2-none-any.whl
```
## Client
### Python 3.7
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.
0-cp37-none-manylinux1_x86_64
.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.
1-cp37-none-any
.whl
```
### Python 3.6
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.
0-cp36-none-manylinux1_x86_64
.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.
1-cp36-none-any
.whl
```
### Python 2.7
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.
0-cp27-none-manylinux1_x86_64
.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.
1-cp27-none-any
.whl
```
## App
### Python 3
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.
0
-py3-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.
1
-py3-none-any.whl
```
### Python 2
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.
0
-py2-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.
1
-py2-none-any.whl
```
python/examples/yolov4/README.md
0 → 100644
浏览文件 @
acbf70c5
# Yolov4 Detection Service
(
[
简体中文
](
README_CN.md
)
|English)
## Get Model
```
python -m paddle_serving_app.package --get_model yolov4
tar -xzvf yolov4.tar.gz
```
## Start RPC Service
```
python -m paddle_serving_server_gpu.serve --model yolov4_model --port 9393 --gpu_ids 0
```
## Prediction
```
python test_client.py 000000570688.jpg
```
After the prediction is completed, a json file to save the prediction result and a picture with the detection result box will be generated in the
`
./outpu folder.
python/examples/yolov4/README_CN.md
0 → 100644
浏览文件 @
acbf70c5
# Yolov4 检测服务
(简体中文|
[
English
](
README.md
)
)
## 获取模型
```
python -m paddle_serving_app.package --get_model yolov4
tar -xzvf yolov4.tar.gz
```
## 启动RPC服务
```
python -m paddle_serving_server_gpu.serve --model yolov4_model --port 9393 --gpu_ids 0
```
## 预测
```
python test_client.py 000000570688.jpg
```
预测完成会在
`./output`
文件夹下生成保存预测结果的json文件以及标出检测结果框的图片。
python/examples/yolov4/test_client.py
浏览文件 @
acbf70c5
...
...
@@ -23,11 +23,11 @@ preprocess = Sequential([
(
2
,
0
,
1
))
])
postprocess
=
RCNNPostprocess
(
"label_list.txt"
,
"output"
)
postprocess
=
RCNNPostprocess
(
"label_list.txt"
,
"output"
,
[
608
,
608
]
)
client
=
Client
()
client
.
load_client_config
(
"yolov4_client/serving_client_conf.prototxt"
)
client
.
connect
([
'127.0.0.1:93
00
'
])
client
.
connect
([
'127.0.0.1:93
93
'
])
im
=
preprocess
(
sys
.
argv
[
1
])
print
(
im
.
shape
)
...
...
python/paddle_serving_app/models/model_list.py
浏览文件 @
acbf70c5
...
...
@@ -24,7 +24,7 @@ class ServingModels(object):
"SentimentAnalysis"
]
=
[
"senta_bilstm"
,
"senta_bow"
,
"senta_cnn"
]
self
.
model_dict
[
"SemanticRepresentation"
]
=
[
"ernie"
]
self
.
model_dict
[
"ChineseWordSegmentation"
]
=
[
"lac"
]
self
.
model_dict
[
"ObjectDetection"
]
=
[
"faster_rcnn"
]
self
.
model_dict
[
"ObjectDetection"
]
=
[
"faster_rcnn"
,
"yolov4"
]
self
.
model_dict
[
"ImageSegmentation"
]
=
[
"unet"
,
"deeplabv3"
,
"deeplabv3+cityscapes"
]
...
...
python/paddle_serving_app/reader/image_reader.py
浏览文件 @
acbf70c5
...
...
@@ -280,10 +280,11 @@ class SegPostprocess(object):
class
RCNNPostprocess
(
object
):
def
__init__
(
self
,
label_file
,
output_dir
):
def
__init__
(
self
,
label_file
,
output_dir
,
resize_shape
=
None
):
self
.
output_dir
=
output_dir
self
.
label_file
=
label_file
self
.
label_list
=
[]
self
.
resize_shape
=
resize_shape
with
open
(
label_file
)
as
fin
:
for
line
in
fin
:
self
.
label_list
.
append
(
line
.
strip
())
...
...
@@ -378,6 +379,13 @@ class RCNNPostprocess(object):
xmax
=
xmin
+
w
ymax
=
ymin
+
h
img_w
,
img_h
=
image
.
size
if
self
.
resize_shape
is
not
None
:
xmin
=
xmin
*
img_w
/
self
.
resize_shape
[
0
]
xmax
=
xmax
*
img_w
/
self
.
resize_shape
[
0
]
ymin
=
ymin
*
img_h
/
self
.
resize_shape
[
1
]
ymax
=
ymax
*
img_h
/
self
.
resize_shape
[
1
]
color
=
tuple
(
color_list
[
catid
])
# draw bbox
...
...
python/paddle_serving_app/version.py
浏览文件 @
acbf70c5
...
...
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving App version string """
serving_app_version
=
"0.1.
0
"
serving_app_version
=
"0.1.
1
"
python/paddle_serving_client/__init__.py
浏览文件 @
acbf70c5
...
...
@@ -404,7 +404,13 @@ class MultiLangClient(object):
self
.
_parse_model_config
(
path
)
def
connect
(
self
,
endpoint
):
self
.
channel_
=
grpc
.
insecure_channel
(
endpoint
[
0
])
#TODO
# https://github.com/tensorflow/serving/issues/1382
options
=
[(
'grpc.max_receive_message_length'
,
512
*
1024
*
1024
),
(
'grpc.max_send_message_length'
,
512
*
1024
*
1024
),
(
'grpc.max_receive_message_length'
,
512
*
1024
*
1024
)]
self
.
channel_
=
grpc
.
insecure_channel
(
endpoint
[
0
],
options
=
options
)
#TODO
self
.
stub_
=
multi_lang_general_model_service_pb2_grpc
.
MultiLangGeneralModelServiceStub
(
self
.
channel_
)
...
...
python/paddle_serving_client/version.py
浏览文件 @
acbf70c5
...
...
@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version
=
"0.3.
0
"
serving_server_version
=
"0.3.
0
"
module_proto_version
=
"0.3.
0
"
serving_client_version
=
"0.3.
1
"
serving_server_version
=
"0.3.
1
"
module_proto_version
=
"0.3.
1
"
python/paddle_serving_server/version.py
浏览文件 @
acbf70c5
...
...
@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version
=
"0.3.
0
"
serving_server_version
=
"0.3.
0
"
module_proto_version
=
"0.3.
0
"
serving_client_version
=
"0.3.
1
"
serving_server_version
=
"0.3.
1
"
module_proto_version
=
"0.3.
1
"
python/paddle_serving_server_gpu/version.py
浏览文件 @
acbf70c5
...
...
@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version
=
"0.3.
0
"
serving_server_version
=
"0.3.
0
"
module_proto_version
=
"0.3.
0
"
serving_client_version
=
"0.3.
1
"
serving_server_version
=
"0.3.
1
"
module_proto_version
=
"0.3.
1
"
tools/python_tag.py
浏览文件 @
acbf70c5
...
...
@@ -15,6 +15,6 @@
from
wheel.pep425tags
import
get_abbr_impl
,
get_impl_ver
,
get_abi_tag
import
re
with
open
(
"setup.cfg"
,
"w"
)
as
f
:
line
=
"[bdist_wheel]
\n
python-tag={0}{1}
\n
plat-name=manylinux1_x86_64"
.
format
(
get_abbr_impl
(),
get_impl_ver
())
line
=
"[bdist_wheel]
\n
python-tag={0}{1}
"
.
format
(
get_abbr_impl
(),
get_impl_ver
())
f
.
write
(
line
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录