Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
e118dd70
S
Serving
项目概览
PaddlePaddle
/
Serving
1 年多 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e118dd70
编写于
1月 22, 2021
作者:
J
Jiawei Wang
提交者:
GitHub
1月 22, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into fix_bert_doc
上级
8fbf0882
b7877d2c
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
125 addition
and
46 deletion
+125
-46
cmake/paddlepaddle.cmake
cmake/paddlepaddle.cmake
+3
-3
doc/FAQ.md
doc/FAQ.md
+36
-0
java/examples/src/main/java/PipelineClientExample.java
java/examples/src/main/java/PipelineClientExample.java
+1
-1
java/examples/src/main/java/StaticPipelineClient.java
java/examples/src/main/java/StaticPipelineClient.java
+1
-1
paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h
...inference/inferencer-fluid-arm/include/fluid_arm_engine.h
+18
-16
python/CMakeLists.txt
python/CMakeLists.txt
+21
-9
python/paddle_serving_app/local_predict.py
python/paddle_serving_app/local_predict.py
+1
-0
python/paddle_serving_server/web_service.py
python/paddle_serving_server/web_service.py
+12
-3
python/paddle_serving_server_gpu/__init__.py
python/paddle_serving_server_gpu/__init__.py
+17
-11
python/paddle_serving_server_gpu/serve.py
python/paddle_serving_server_gpu/serve.py
+1
-0
python/paddle_serving_server_gpu/web_service.py
python/paddle_serving_server_gpu/web_service.py
+12
-2
python/pipeline/local_service_handler.py
python/pipeline/local_service_handler.py
+2
-0
未找到文件。
cmake/paddlepaddle.cmake
浏览文件 @
e118dd70
...
...
@@ -136,8 +136,8 @@ if (WITH_TRT)
endif
()
if
(
WITH_LITE
)
ADD_LIBRARY
(
paddle_
api_full_bundl
ed STATIC IMPORTED GLOBAL
)
SET_PROPERTY
(
TARGET paddle_
api_full_bundled PROPERTY IMPORTED_LOCATION
${
PADDLE_INSTALL_DIR
}
/third_party/install/lite/cxx/lib/libpaddle_api_full_bundled.a
)
ADD_LIBRARY
(
paddle_
full_api_shar
ed STATIC IMPORTED GLOBAL
)
SET_PROPERTY
(
TARGET paddle_
full_api_shared PROPERTY IMPORTED_LOCATION
${
PADDLE_INSTALL_DIR
}
/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so
)
if
(
WITH_XPU
)
ADD_LIBRARY
(
xpuapi SHARED IMPORTED GLOBAL
)
...
...
@@ -157,7 +157,7 @@ LIST(APPEND paddle_depend_libs
xxhash
)
if
(
WITH_LITE
)
LIST
(
APPEND paddle_depend_libs paddle_
api_full_bundl
ed
)
LIST
(
APPEND paddle_depend_libs paddle_
full_api_shar
ed
)
if
(
WITH_XPU
)
LIST
(
APPEND paddle_depend_libs xpuapi xpurt
)
endif
()
...
...
doc/FAQ.md
浏览文件 @
e118dd70
...
...
@@ -34,6 +34,42 @@
**A:**
http rpc
## 安装问题
#### Q: pip install安装whl包过程,报错信息如下:
```
Collecting opencv-python
Using cached opencv-python-4.3.0.38.tar.gz (88.0 MB)
Installing build dependencies ... done
Getting requirements to build wheel ... error
ERROR: Command errored out with exit status 1:
command: /home/work/Python-2.7.17/build/bin/python /home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py get_requires_for_build_wheel /tmp/tmpLiweA9
cwd: /tmp/pip-install-_w6AUI/opencv-python
Complete output (22 lines):
Traceback (most recent call last):
File "/home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 280, in <module>
main()
File "/home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 263, in main
json_out['return_val'] = hook(**hook_input['kwargs'])
File "/home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 114, in get_requires_for_build_wheel
return hook(config_settings)
File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 146, in get_requires_for_build_wheel
return self._get_build_requires(config_settings, requirements=['wheel'])
File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 127, in _get_build_requires
self.run_setup()
File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 243, in run_setup
self).run_setup(setup_script=setup_script)
File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 142, in run_setup
exec(compile(code, __file__, 'exec'), locals())
File "setup.py", line 448, in <module>
main()
File "setup.py", line 99, in main
% {"ext": re.escape(sysconfig.get_config_var("EXT_SUFFIX"))}
File "/home/work/Python-2.7.17/build/lib/python2.7/re.py", line 210, in escape
s = list(pattern)
TypeError: 'NoneType' object is not iterable
```
**A:**
指定opencv-python版本安装,pip install opencv-python==4.2.0.32,再安装whl包
## 编译问题
...
...
java/examples/src/main/java/PipelineClientExample.java
浏览文件 @
e118dd70
...
...
@@ -62,7 +62,7 @@ public class PipelineClientExample {
return
false
;
}
}
PipelineFuture
future
=
StaticPipelineClient
.
client
.
asyn_pr
::
q
edict
(
feed_data
,
fetch
,
false
,
0
);
PipelineFuture
future
=
StaticPipelineClient
.
client
.
asyn_predict
(
feed_data
,
fetch
,
false
,
0
);
HashMap
<
String
,
String
>
result
=
future
.
get
();
if
(
result
==
null
)
{
return
false
;
...
...
java/examples/src/main/java/StaticPipelineClient.java
浏览文件 @
e118dd70
...
...
@@ -37,7 +37,7 @@ public class StaticPipelineClient {
System
.
out
.
println
(
"already connect."
);
return
true
;
}
succ
=
clie
i
nt
.
connect
(
target
);
succ
=
client
.
connect
(
target
);
if
(
succ
!=
true
)
{
System
.
out
.
println
(
"connect failed."
);
return
false
;
...
...
paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h
浏览文件 @
e118dd70
...
...
@@ -128,20 +128,22 @@ class FluidArmAnalysisCore : public FluidFamilyCore {
config
.
DisableGpu
();
config
.
SetCpuMathLibraryNumThreads
(
1
);
if
(
params
.
enable_memory_optimization
())
{
config
.
Enable
MemoryOptim
(
);
if
(
params
.
use_lite
())
{
config
.
Enable
LiteEngine
(
PrecisionType
::
kFloat32
,
true
);
}
if
(
params
.
enable_memory_optimization
())
{
config
.
Enable
MemoryOptim
(
);
if
(
params
.
use_xpu
())
{
config
.
Enable
Xpu
(
2
*
1024
*
1024
);
}
if
(
params
.
use_lite
())
{
config
.
Enable
LiteEngine
(
PrecisionType
::
kFloat32
,
true
);
if
(
params
.
enable_memory_optimization
())
{
config
.
Enable
MemoryOptim
(
);
}
if
(
params
.
use_xpu
())
{
config
.
EnableXpu
(
100
);
if
(
params
.
enable_ir_optimization
())
{
config
.
SwitchIrOptim
(
true
);
}
else
{
config
.
SwitchIrOptim
(
false
);
}
config
.
SwitchSpecifyInputNames
(
true
);
...
...
@@ -173,6 +175,14 @@ class FluidArmAnalysisDirCore : public FluidFamilyCore {
config
.
SwitchSpecifyInputNames
(
true
);
config
.
SetCpuMathLibraryNumThreads
(
1
);
if
(
params
.
use_lite
())
{
config
.
EnableLiteEngine
(
PrecisionType
::
kFloat32
,
true
);
}
if
(
params
.
use_xpu
())
{
config
.
EnableXpu
(
2
*
1024
*
1024
);
}
if
(
params
.
enable_memory_optimization
())
{
config
.
EnableMemoryOptim
();
}
...
...
@@ -183,14 +193,6 @@ class FluidArmAnalysisDirCore : public FluidFamilyCore {
config
.
SwitchIrOptim
(
false
);
}
if
(
params
.
use_lite
())
{
config
.
EnableLiteEngine
(
PrecisionType
::
kFloat32
,
true
);
}
if
(
params
.
use_xpu
())
{
config
.
EnableXpu
(
100
);
}
AutoLock
lock
(
GlobalPaddleCreateMutex
::
instance
());
_core
=
CreatePredictor
(
config
);
if
(
NULL
==
_core
.
get
())
{
...
...
python/CMakeLists.txt
浏览文件 @
e118dd70
...
...
@@ -99,6 +99,17 @@ if (SERVER)
DEPENDS
${
SERVING_SERVER_CORE
}
server_config_py_proto
${
PY_FILES
}
)
add_custom_target
(
paddle_python ALL DEPENDS
${
PADDLE_SERVING_BINARY_DIR
}
/.timestamp
)
elseif
(
WITH_LITE
)
if
(
WITH_XPU
)
add_custom_command
(
OUTPUT
${
PADDLE_SERVING_BINARY_DIR
}
/.timestamp
COMMAND cp -r
${
CMAKE_CURRENT_SOURCE_DIR
}
/paddle_serving_server_gpu/
${
PADDLE_SERVING_BINARY_DIR
}
/python/
COMMAND env
${
py_env
}
${
PYTHON_EXECUTABLE
}
gen_version.py
"server_gpu"
arm-xpu
COMMAND env
${
py_env
}
${
PYTHON_EXECUTABLE
}
setup.py bdist_wheel
DEPENDS
${
SERVING_SERVER_CORE
}
server_config_py_proto
${
PY_FILES
}
)
add_custom_target
(
paddle_python ALL DEPENDS
${
PADDLE_SERVING_BINARY_DIR
}
/.timestamp
)
else
()
add_custom_command
(
OUTPUT
${
PADDLE_SERVING_BINARY_DIR
}
/.timestamp
COMMAND cp -r
...
...
@@ -108,6 +119,7 @@ if (SERVER)
COMMAND env
${
py_env
}
${
PYTHON_EXECUTABLE
}
setup.py bdist_wheel
DEPENDS
${
SERVING_SERVER_CORE
}
server_config_py_proto
${
PY_FILES
}
)
add_custom_target
(
paddle_python ALL DEPENDS
${
PADDLE_SERVING_BINARY_DIR
}
/.timestamp
)
endif
()
else
()
add_custom_command
(
OUTPUT
${
PADDLE_SERVING_BINARY_DIR
}
/.timestamp
...
...
python/paddle_serving_app/local_predict.py
浏览文件 @
e118dd70
...
...
@@ -132,6 +132,7 @@ class LocalPredictor(object):
ops_filter
=
[])
if
use_xpu
:
# 2MB l3 cache
config
.
enable_xpu
(
8
*
1024
*
1024
)
self
.
predictor
=
create_paddle_predictor
(
config
)
...
...
python/paddle_serving_server/web_service.py
浏览文件 @
e118dd70
...
...
@@ -20,7 +20,7 @@ from paddle_serving_server import OpMaker, OpSeqMaker, Server
from
paddle_serving_client
import
Client
from
contextlib
import
closing
import
socket
import
numpy
as
np
from
paddle_serving_server
import
pipeline
from
paddle_serving_server.pipeline
import
Op
...
...
@@ -64,8 +64,8 @@ class WebService(object):
f
=
open
(
client_config
,
'r'
)
model_conf
=
google
.
protobuf
.
text_format
.
Merge
(
str
(
f
.
read
()),
model_conf
)
self
.
feed_
names
=
[
var
.
alias_name
for
var
in
model_conf
.
feed_var
]
self
.
fetch_
names
=
[
var
.
alias_name
for
var
in
model_conf
.
fetch_var
]
self
.
feed_
vars
=
{
var
.
name
:
var
for
var
in
model_conf
.
feed_var
}
self
.
fetch_
vars
=
{
var
.
name
:
var
for
var
in
model_conf
.
fetch_var
}
def
_launch_rpc_service
(
self
):
op_maker
=
OpMaker
()
...
...
@@ -201,6 +201,15 @@ class WebService(object):
def
preprocess
(
self
,
feed
=
[],
fetch
=
[]):
print
(
"This API will be deprecated later. Please do not use it"
)
is_batch
=
True
feed_dict
=
{}
for
var_name
in
self
.
feed_vars
.
keys
():
feed_dict
[
var_name
]
=
[]
for
feed_ins
in
feed
:
for
key
in
feed_ins
:
feed_dict
[
key
].
append
(
np
.
array
(
feed_ins
[
key
]).
reshape
(
list
(
self
.
feed_vars
[
key
].
shape
))[
np
.
newaxis
,:])
feed
=
{}
for
key
in
feed_dict
:
feed
[
key
]
=
np
.
concatenate
(
feed_dict
[
key
],
axis
=
0
)
return
feed
,
fetch
,
is_batch
def
postprocess
(
self
,
feed
=
[],
fetch
=
[],
fetch_map
=
None
):
...
...
python/paddle_serving_server_gpu/__init__.py
浏览文件 @
e118dd70
...
...
@@ -212,6 +212,7 @@ class Server(object):
self
.
module_path
=
os
.
path
.
dirname
(
paddle_serving_server
.
__file__
)
self
.
cur_path
=
os
.
getcwd
()
self
.
use_local_bin
=
False
self
.
device
=
"cpu"
self
.
gpuid
=
0
self
.
use_trt
=
False
self
.
use_lite
=
False
...
...
@@ -279,6 +280,9 @@ class Server(object):
"GPU not found, please check your environment or use cpu version by
\"
pip install paddle_serving_server
\"
"
)
def
set_device
(
self
,
device
=
"cpu"
):
self
.
device
=
device
def
set_gpuid
(
self
,
gpuid
=
0
):
self
.
gpuid
=
gpuid
...
...
@@ -311,18 +315,19 @@ class Server(object):
engine
.
static_optimization
=
False
engine
.
force_update_static_cache
=
False
engine
.
use_trt
=
self
.
use_trt
if
os
.
path
.
exists
(
'{}/__params__'
.
format
(
model_config_path
)):
suffix
=
""
else
:
suffix
=
"_DIR"
if
device
==
"arm"
:
engine
.
use_lite
=
self
.
use_lite
engine
.
use_xpu
=
self
.
use_xpu
if
device
==
"cpu"
:
engine
.
type
=
"FLUID_CPU_ANALYSIS
_DIR"
engine
.
type
=
"FLUID_CPU_ANALYSIS
"
+
suffix
elif
device
==
"gpu"
:
engine
.
type
=
"FLUID_GPU_ANALYSIS
_DIR"
engine
.
type
=
"FLUID_GPU_ANALYSIS
"
+
suffix
elif
device
==
"arm"
:
engine
.
type
=
"FLUID_ARM_ANALYSIS_DIR"
engine
.
type
=
"FLUID_ARM_ANALYSIS"
+
suffix
self
.
model_toolkit_conf
.
engines
.
extend
([
engine
])
def
_prepare_infer_service
(
self
,
port
):
...
...
@@ -425,7 +430,7 @@ class Server(object):
cuda_version
=
line
.
split
(
"
\"
"
)[
1
]
if
cuda_version
==
"101"
or
cuda_version
==
"102"
or
cuda_version
==
"110"
:
device_version
=
"serving-gpu-"
+
cuda_version
+
"-"
elif
cuda_version
==
"arm"
:
elif
cuda_version
==
"arm"
or
cuda_version
==
"arm-xpu"
:
device_version
=
"serving-"
+
cuda_version
+
"-"
else
:
device_version
=
"serving-gpu-cuda"
+
cuda_version
+
"-"
...
...
@@ -528,7 +533,8 @@ class Server(object):
else
:
print
(
"Use local bin : {}"
.
format
(
self
.
bin_path
))
#self.check_cuda()
if
self
.
use_lite
:
# Todo: merge CPU and GPU code, remove device to model_toolkit
if
self
.
device
==
"cpu"
or
self
.
device
==
"arm"
:
command
=
"{} "
\
"-enable_model_toolkit "
\
"-inferservice_path {} "
\
...
...
python/paddle_serving_server_gpu/serve.py
浏览文件 @
e118dd70
...
...
@@ -73,6 +73,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss
server
.
set_lite
()
device
=
"arm"
server
.
set_device
(
device
)
if
args
.
use_xpu
:
server
.
set_xpu
()
...
...
python/paddle_serving_server_gpu/web_service.py
浏览文件 @
e118dd70
...
...
@@ -70,8 +70,8 @@ class WebService(object):
f
=
open
(
client_config
,
'r'
)
model_conf
=
google
.
protobuf
.
text_format
.
Merge
(
str
(
f
.
read
()),
model_conf
)
self
.
feed_
names
=
[
var
.
alias_name
for
var
in
model_conf
.
feed_var
]
self
.
fetch_
names
=
[
var
.
alias_name
for
var
in
model_conf
.
fetch_var
]
self
.
feed_
vars
=
{
var
.
name
:
var
for
var
in
model_conf
.
feed_var
}
self
.
fetch_
vars
=
{
var
.
name
:
var
for
var
in
model_conf
.
fetch_var
}
def
set_gpus
(
self
,
gpus
):
print
(
"This API will be deprecated later. Please do not use it"
)
...
...
@@ -107,6 +107,7 @@ class WebService(object):
server
.
set_num_threads
(
thread_num
)
server
.
set_memory_optimize
(
mem_optim
)
server
.
set_ir_optimize
(
ir_optim
)
server
.
set_device
(
device
)
if
use_lite
:
server
.
set_lite
()
...
...
@@ -278,6 +279,15 @@ class WebService(object):
def
preprocess
(
self
,
feed
=
[],
fetch
=
[]):
print
(
"This API will be deprecated later. Please do not use it"
)
is_batch
=
True
feed_dict
=
{}
for
var_name
in
self
.
feed_vars
.
keys
():
feed_dict
[
var_name
]
=
[]
for
feed_ins
in
feed
:
for
key
in
feed_ins
:
feed_dict
[
key
].
append
(
np
.
array
(
feed_ins
[
key
]).
reshape
(
list
(
self
.
feed_vars
[
key
].
shape
))[
np
.
newaxis
,:])
feed
=
{}
for
key
in
feed_dict
:
feed
[
key
]
=
np
.
concatenate
(
feed_dict
[
key
],
axis
=
0
)
return
feed
,
fetch
,
is_batch
def
postprocess
(
self
,
feed
=
[],
fetch
=
[],
fetch_map
=
None
):
...
...
python/pipeline/local_service_handler.py
浏览文件 @
e118dd70
...
...
@@ -249,6 +249,8 @@ class LocalServiceHandler(object):
server
=
Server
()
if
gpuid
>=
0
:
server
.
set_gpuid
(
gpuid
)
# TODO: support arm or arm + xpu later
server
.
set_device
(
self
.
_device_name
)
server
.
set_op_sequence
(
op_seq_maker
.
get_op_sequence
())
server
.
set_num_threads
(
thread_num
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录